bitkeeper revision 1.392 (3f3244ecABjhViAc-zlxaKkbsstr4Q)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 7 Aug 2003 12:24:12 +0000 (12:24 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 7 Aug 2003 12:24:12 +0000 (12:24 +0000)
Many files:
  Upgraded our aacraid driver to latest Linux-ac version (Linux 2.4.21-ac4 with aacraid v1.1.2).

12 files changed:
xen/drivers/scsi/aacraid/Makefile
xen/drivers/scsi/aacraid/README
xen/drivers/scsi/aacraid/TODO
xen/drivers/scsi/aacraid/aachba.c
xen/drivers/scsi/aacraid/aacraid.h
xen/drivers/scsi/aacraid/commctrl.c
xen/drivers/scsi/aacraid/comminit.c
xen/drivers/scsi/aacraid/commsup.c
xen/drivers/scsi/aacraid/dpcsup.c
xen/drivers/scsi/aacraid/linit.c
xen/drivers/scsi/aacraid/rx.c
xen/drivers/scsi/aacraid/sa.c

index 7d802c3bc9cf93bb7d72899cfb39a6e65006b6a2..b7235fca4b41659bc27fc148cac53382552de5e0 100644 (file)
@@ -3,10 +3,6 @@ include $(BASEDIR)/Rules.mk
 
 CFLAGS += -I$(BASEDIR)/drivers/scsi
 
-
-# -y           := linit.o aachba.o commctrl.o comminit.o commsup.o \
-#                 dpcsup.o rx.o sa.o
-
 default: $(OBJS)
        $(LD) -r -o aacraid.o $(OBJS)
 
index 9f73c6719b2080309c899b0aadd0160d884cb1e5..617258fdc8a23d9c7275bc2e3eb4443ea023ed91 100644 (file)
@@ -18,6 +18,12 @@ Supported Cards/Chipsets
        ADAPTEC 2120S
        ADAPTEC 2200S
        ADAPTEC 5400S
+       Legend S220
+       Legend S230
+       Adaptec 3230S
+       Adaptec 3240S
+       ASR-2020S PCI-X
+       AAR-2410SA SATA
 
 People
 -------------------------
@@ -28,15 +34,22 @@ Deanna Bonds <deanna_bonds@adaptec.com> (non-DASD support, PAE fibs and 64 bit,
                                         added new ioctls, changed scsi interface to use new error handler,
                                         increased the number of fibs and outstanding commands to a container)
 
+                                       (fixed 64bit and 64G memory model, changed confusing naming convention
+                                        where fibs that go to the hardware are consistently called hw_fibs and
+                                        not just fibs like the name of the driver tracking structure)
+Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
+
 Original Driver
 -------------------------
 Adaptec Unix OEM Product Group
 
 Mailing List
 -------------------------
-None currently. Also note this is very different to Brian's original driver
+linux-aacraid-devel@dell.com (Interested parties troll here)
+http://mbserver.adaptec.com/ (Currently more Community Support than Devel Support)
+Also note this is very different to Brian's original driver
 so don't expect him to support it.
-Adaptec does support this driver.  Contact either tech support or deanna bonds.
+Adaptec does support this driver.  Contact either tech support or Mark Salyzyn.
 
 Original by Brian Boerner February 2001
 Rewritten by Alan Cox, November 2001
index 6f710224134c6c57cd35b692f996f5574ef23860..f9ac6ac884e913b29158cfc5d5d266b7d755057b 100644 (file)
@@ -2,3 +2,4 @@ o       Testing
 o      More testing
 o      Feature request: display the firmware/bios/etc revisions in the
        /proc info
+o      2.5.0 and beyond.
index 198e37cace5392a6dbd80eb6a84cac3b62ae7107..91153460e896fc387f1901eb5d95c69eeb4219f2 100644 (file)
  *
  */
 
-#include <xeno/config.h>
-/*  #include <xeno/kernel.h> */
-#include <xeno/init.h>
-#include <xeno/sched.h>
-#include <xeno/pci.h>
-/*  #include <xeno/spinlock.h> */
-/*  #include <xeno/slab.h> */
-/*  #include <xeno/completion.h> */
-/*  #include <asm/semaphore.h> */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+/*#include <linux/completion.h>*/
+/*#include <asm/semaphore.h>*/
 #include <asm/uaccess.h>
 #define MAJOR_NR SCSI_DISK0_MAJOR      /* For DEVICE_NR() */
 #include <linux/blk.h>
@@ -213,8 +215,7 @@ struct sense_data {
  *              M O D U L E   G L O B A L S
  */
  
-static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS]; /*  SCSI Device 
-                                                              Instance Ptrs */
+static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS];     /*  SCSI Device Instance Pointers */
 static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
 static void get_sd_devname(int disknum, char *buffer);
 static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap);
@@ -224,6 +225,15 @@ static int aac_send_srb_fib(Scsi_Cmnd* scsicmd);
 static char *aac_get_status_string(u32 status);
 #endif
 
+/*
+ *     Non dasd selection is handled entirely in aachba now
+ */    
+MODULE_PARM(nondasd, "i");
+MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
+
+static int nondasd = -1;
+
 /**
  *     aac_get_containers      -       list containers
  *     @common: adapter to probe
@@ -232,57 +242,59 @@ static char *aac_get_status_string(u32 status);
  */
 int aac_get_containers(struct aac_dev *dev)
 {
-    struct fsa_scsi_hba *fsa_dev_ptr;
-    u32 index, status = 0;
-    struct aac_query_mount *dinfo;
-    struct aac_mount *dresp;
-    struct fib * fibptr;
-    unsigned instance;
-    
-    fsa_dev_ptr = &(dev->fsa_dev);
-    instance = dev->scsi_host_ptr->unique_id;
-    
-    if (!(fibptr = fib_alloc(dev)))
-       return -ENOMEM;
-    
-    for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
-       fib_init(fibptr);
-       dinfo = (struct aac_query_mount *) fib_data(fibptr);
-       
-       dinfo->command = cpu_to_le32(VM_NameServe);
-       dinfo->count = cpu_to_le32(index);
-       dinfo->type = cpu_to_le32(FT_FILESYS);
+       struct fsa_scsi_hba *fsa_dev_ptr;
+       u32 index;
+       int status = 0;
+       struct aac_query_mount *dinfo;
+       struct aac_mount *dresp;
+       struct fib * fibptr;
+       unsigned instance;
 
-       status = fib_send(ContainerCommand,
-                         fibptr,
-                         sizeof (struct aac_query_mount),
-                         FsaNormal,
-                         1, 1,
-                         NULL, NULL);
-       if (status < 0 ) {
-           printk(KERN_WARNING "ProbeContainers: SendFIB failed.\n");
-           break;
-       }
-       dresp = (struct aac_mount *)fib_data(fibptr);
-       
-       if ((le32_to_cpu(dresp->status) == ST_OK) &&
-           (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
-           fsa_dev_ptr->valid[index] = 1;
-           fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
-           fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
-           if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
-               fsa_dev_ptr->ro[index] = 1;
+       fsa_dev_ptr = &(dev->fsa_dev);
+       instance = dev->scsi_host_ptr->unique_id;
+
+       if (!(fibptr = fib_alloc(dev)))
+               return -ENOMEM;
+
+       for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
+               fib_init(fibptr);
+               dinfo = (struct aac_query_mount *) fib_data(fibptr);
+
+               dinfo->command = cpu_to_le32(VM_NameServe);
+               dinfo->count = cpu_to_le32(index);
+               dinfo->type = cpu_to_le32(FT_FILESYS);
+
+               status = fib_send(ContainerCommand,
+                                   fibptr,
+                                   sizeof (struct aac_query_mount),
+                                   FsaNormal,
+                                   1, 1,
+                                   NULL, NULL);
+               if (status < 0 ) {
+                       printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
+                       break;
+               }
+               dresp = (struct aac_mount *)fib_data(fibptr);
+
+               if ((le32_to_cpu(dresp->status) == ST_OK) &&
+                   (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
+                   (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+                       fsa_dev_ptr->valid[index] = 1;
+                       fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
+                       fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
+                       if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
+                                   fsa_dev_ptr->ro[index] = 1;
+               }
+               fib_complete(fibptr);
+               /*
+                *      If there are no more containers, then stop asking.
+                */
+               if ((index + 1) >= le32_to_cpu(dresp->count))
+                       break;
        }
-       fib_complete(fibptr);
-       /*
-        *      If there are no more containers, then stop asking.
-        */
-       if ((index + 1) >= le32_to_cpu(dresp->count))
-           break;
-    }
-    fib_free(fibptr);
-    fsa_dev[instance] = fsa_dev_ptr;
-    return status;
+       fib_free(fibptr);
+       fsa_dev[instance] = fsa_dev_ptr;
+       return status;
 }
 
 /**
@@ -296,54 +308,55 @@ int aac_get_containers(struct aac_dev *dev)
  
 static int probe_container(struct aac_dev *dev, int cid)
 {
-    struct fsa_scsi_hba *fsa_dev_ptr;
-    int status;
-    struct aac_query_mount *dinfo;
-    struct aac_mount *dresp;
-    struct fib * fibptr;
-    unsigned instance;
-    
-    fsa_dev_ptr = &(dev->fsa_dev);
-    instance = dev->scsi_host_ptr->unique_id;
-    
-    if (!(fibptr = fib_alloc(dev)))
-       return -ENOMEM;
-    
-    fib_init(fibptr);
-    
-    dinfo = (struct aac_query_mount *)fib_data(fibptr);
-    
-    dinfo->command = cpu_to_le32(VM_NameServe);
-    dinfo->count = cpu_to_le32(cid);
-    dinfo->type = cpu_to_le32(FT_FILESYS);
-    
-    status = fib_send(ContainerCommand,
-                     fibptr,
-                     sizeof(struct aac_query_mount),
-                     FsaNormal,
-                     1, 1,
-                     NULL, NULL);
-    if (status < 0) {
-       printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
-       goto error;
-    }
-    
-    dresp = (struct aac_mount *) fib_data(fibptr);
-    
-    if ((le32_to_cpu(dresp->status) == ST_OK) &&
-       (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
-       fsa_dev_ptr->valid[cid] = 1;
-       fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
-       fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
-       if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
-           fsa_dev_ptr->ro[cid] = 1;
-    }
-    
- error:
-    fib_complete(fibptr);
-    fib_free(fibptr);
-    
-    return status;
+       struct fsa_scsi_hba *fsa_dev_ptr;
+       int status;
+       struct aac_query_mount *dinfo;
+       struct aac_mount *dresp;
+       struct fib * fibptr;
+       unsigned instance;
+
+       fsa_dev_ptr = &(dev->fsa_dev);
+       instance = dev->scsi_host_ptr->unique_id;
+
+       if (!(fibptr = fib_alloc(dev)))
+               return -ENOMEM;
+
+       fib_init(fibptr);
+
+       dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+       dinfo->command = cpu_to_le32(VM_NameServe);
+       dinfo->count = cpu_to_le32(cid);
+       dinfo->type = cpu_to_le32(FT_FILESYS);
+
+       status = fib_send(ContainerCommand,
+                           fibptr,
+                           sizeof(struct aac_query_mount),
+                           FsaNormal,
+                           1, 1,
+                           NULL, NULL);
+       if (status < 0) {
+               printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
+               goto error;
+       }
+
+       dresp = (struct aac_mount *) fib_data(fibptr);
+
+       if ((le32_to_cpu(dresp->status) == ST_OK) &&
+           (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
+           (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+               fsa_dev_ptr->valid[cid] = 1;
+               fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
+               fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
+               if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
+                       fsa_dev_ptr->ro[cid] = 1;
+       }
+
+error:
+       fib_complete(fibptr);
+       fib_free(fibptr);
+
+       return status;
 }
 
 /* Local Structure to set SCSI inquiry data strings */
@@ -482,7 +495,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
        struct aac_adapter_info* info;
        int rcode;
        u32 tmp;
-
        if (!(fibptr = fib_alloc(dev)))
                return -ENOMEM;
 
@@ -520,24 +532,54 @@ int aac_get_adapter_info(struct aac_dev* dev)
                        dev->name, dev->id,
                        dev->adapter_info.serial[0],
                        dev->adapter_info.serial[1]);
-       dev->pae_support = 0;
+
        dev->nondasd_support = 0;
-       if( BITS_PER_LONG >= 64 && 
-         (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
-               printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", 
-                      dev->name, dev->id);
+       dev->raid_scsi_mode = 0;
+       if(dev->adapter_info.options & AAC_OPT_NONDASD){
+               dev->nondasd_support = 1;
+       }
+
+       /*
+        * If the firmware supports ROMB RAID/SCSI mode and we are currently
+        * in RAID/SCSI mode, set the flag. For now if in this mode we will
+        * force nondasd support on. If we decide to allow the non-dasd flag
+        * additional changes changes will have to be made to support
+        * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
+        * changed to support the new dev->raid_scsi_mode flag instead of
+        * leaching off of the dev->nondasd_support flag. Also in linit.c the
+        * function aac_detect will have to be modified where it sets up the
+        * max number of channels based on the aac->nondasd_support flag only.
+        */
+       if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED)
+               && (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE))
+       {
+               dev->nondasd_support = 1;
+               dev->raid_scsi_mode = 1;
+       }
+       if (dev->raid_scsi_mode != 0)
+               printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",dev->name, dev->id);
+               
+       if (nondasd != -1)
+               dev->nondasd_support = (nondasd!=0);
+
+       if(dev->nondasd_support != 0)
+               printk(KERN_INFO "%s%d: Non-DASD support enabled\n",dev->name, dev->id);
+
+       dev->pae_support = 0;
+       if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
                dev->pae_support = 1;
        }
        /* TODO - dmb temporary until fw can set this bit  */
        dev->pae_support = (BITS_PER_LONG >= 64);
-       if(dev->pae_support != 0) {
-               printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", 
-                      dev->name, dev->id);
+       if(dev->pae_support != 0) 
+       {
+               printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
+               pci_set_dma_mask(dev->pdev, (dma_addr_t)0xFFFFFFFFFFFFFFFFULL);
        }
 
-       if(dev->adapter_info.options & AAC_OPT_NONDASD){
-               dev->nondasd_support = 1;
-       }
+       fib_complete(fibptr);
+       fib_free(fibptr);
+
        return rcode;
 }
 
@@ -556,7 +598,7 @@ static void read_callback(void *context, struct fib * fibptr)
        cid =TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
 
        lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
-       dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
+       dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
 
        if (fibptr == NULL)
                BUG();
@@ -601,7 +643,7 @@ static void write_callback(void *context, struct fib * fibptr)
        cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
 
        lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
-       dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
+       dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
        if (fibptr == NULL)
                BUG();
 
@@ -687,8 +729,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int cid)
                aac_build_sg64(scsicmd, &readcmd->sg);
                if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
                        BUG();
-               fibsize = sizeof(struct aac_read64) + 
-                   ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
+               fibsize = sizeof(struct aac_read64) + ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
                /*
                 *      Now send the Fib to the adapter
                 */
@@ -713,8 +754,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int cid)
                aac_build_sg(scsicmd, &readcmd->sg);
                if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
                        BUG();
-               fibsize = sizeof(struct aac_read) + 
-                   ((readcmd->sg.count - 1) * sizeof (struct sgentry));
+               fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
                /*
                 *      Now send the Fib to the adapter
                 */
@@ -734,8 +774,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int cid)
        if (status == -EINPROGRESS) 
                return 0;
                
-       printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", 
-              status);
+       printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
        /*
         *      For some reason, the Fib didn't queue, return QUEUE_FULL
         */
@@ -770,8 +809,7 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
                lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
                count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
        }
-       dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", 
-                smp_processor_id(), lba, jiffies));
+       dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
        /*
         *      Allocate and initialize a Fib then setup a BlockWrite command
         */
@@ -796,8 +834,7 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
                aac_build_sg64(scsicmd, &writecmd->sg);
                if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
                        BUG();
-               fibsize = sizeof(struct aac_write64) + 
-                   ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
+               fibsize = sizeof(struct aac_write64) + ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
                /*
                 *      Now send the Fib to the adapter
                 */
@@ -825,8 +862,7 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
                aac_build_sg(scsicmd, &writecmd->sg);
                if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
                        BUG();
-               fibsize = sizeof(struct aac_write) + 
-                   ((writecmd->sg.count - 1) * sizeof (struct sgentry));
+               fibsize = sizeof(struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
                /*
                 *      Now send the Fib to the adapter
                 */
@@ -869,358 +905,351 @@ static int aac_write(Scsi_Cmnd * scsicmd, int cid)
  
 int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
 {
-    u32 cid = 0;
-    struct fsa_scsi_hba *fsa_dev_ptr;
-    int cardtype;
-    int ret;
-    struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
-    
-    cardtype = dev->cardtype;
-
-    fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
-    
-    /*
-     * If the bus, target or lun is out of range, return fail
-     * Test does not apply to ID 16, the pseudo id for the controller
-     * itself.
-     */
-    if (scsicmd->target != scsicmd->host->this_id) {
-       if ((scsicmd->channel == 0) ){
-           if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){ 
-               scsicmd->result = DID_NO_CONNECT << 16;
-               __aac_io_done(scsicmd);
-               return 0;
-           }
-           cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
-           
-           /*
-            *  If the target container doesn't exist, it may have
-            *  been newly created
-            */
-           if (fsa_dev_ptr->valid[cid] == 0) {
-               switch (scsicmd->cmnd[0]) {
-               case SS_INQUIR:
-               case SS_RDCAP:
-               case SS_TEST:
-                   spin_unlock_irq(&io_request_lock);
-                   probe_container(dev, cid);
-                   spin_lock_irq(&io_request_lock);
-                   if (fsa_dev_ptr->valid[cid] == 0) {
-                       scsicmd->result = DID_NO_CONNECT << 16;
-                       __aac_io_done(scsicmd);
-                       return 0;
-                   }
-               default:
-                   break;
+       u32 cid = 0;
+       struct fsa_scsi_hba *fsa_dev_ptr;
+       int cardtype;
+       int ret;
+       struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
+       
+       cardtype = dev->cardtype;
+
+       fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
+
+       /*
+        *      If the bus, target or lun is out of range, return fail
+        *      Test does not apply to ID 16, the pseudo id for the controller
+        *      itself.
+        */
+       if (scsicmd->target != scsicmd->host->this_id) {
+               if ((scsicmd->channel == 0) ){
+                       if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){ 
+                               scsicmd->result = DID_NO_CONNECT << 16;
+                               __aac_io_done(scsicmd);
+                               return 0;
+                       }
+                       cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
+
+                       /*
+                        *      If the target container doesn't exist, it may have
+                        *      been newly created
+                        */
+                       if (fsa_dev_ptr->valid[cid] == 0) {
+                               switch (scsicmd->cmnd[0]) {
+                               case SS_INQUIR:
+                               case SS_RDCAP:
+                               case SS_TEST:
+                                       spin_unlock_irq(&io_request_lock);
+                                       probe_container(dev, cid);
+                                       spin_lock_irq(&io_request_lock);
+                                       if (fsa_dev_ptr->valid[cid] == 0) {
+                                               scsicmd->result = DID_NO_CONNECT << 16;
+                                               __aac_io_done(scsicmd);
+                                               return 0;
+                                       }
+                               default:
+                                       break;
+                               }
+                       }
+                       /*
+                        *      If the target container still doesn't exist, 
+                        *      return failure
+                        */
+                       if (fsa_dev_ptr->valid[cid] == 0) {
+                               scsicmd->result = DID_BAD_TARGET << 16;
+                               __aac_io_done(scsicmd);
+                               return -1;
+                       }
+               } else {  /* check for physical non-dasd devices */
+                       if(dev->nondasd_support == 1){
+                               return aac_send_srb_fib(scsicmd);
+                       } else {
+                               scsicmd->result = DID_NO_CONNECT << 16;
+                               __aac_io_done(scsicmd);
+                               return 0;
+                       }
                }
-           }
-           /*
-            *  If the target container still doesn't exist, 
-            *  return failure
-            */
-           if (fsa_dev_ptr->valid[cid] == 0) {
-               scsicmd->result = DID_BAD_TARGET << 16;
+       }
+       /*
+        * else Command for the controller itself
+        */
+       else if ((scsicmd->cmnd[0] != SS_INQUIR) &&     /* only INQUIRY & TUR cmnd supported for controller */
+               (scsicmd->cmnd[0] != SS_TEST)) 
+       {
+               dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+               set_sense((u8 *) &sense_data[cid],
+                           SENKEY_ILLEGAL,
+                           SENCODE_INVALID_COMMAND,
+                           ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
                __aac_io_done(scsicmd);
                return -1;
-                       }
-       } else {  /* check for physical non-dasd devices */
-           if(dev->nondasd_support == 1){
-               return aac_send_srb_fib(scsicmd);
-           } else {
-               scsicmd->result = DID_NO_CONNECT << 16;
+       }
+
+
+       /* Handle commands here that don't really require going out to the adapter */
+       switch (scsicmd->cmnd[0]) {
+       case SS_INQUIR:
+       {
+               struct inquiry_data *inq_data_ptr;
+
+               dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
+               inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
+               memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
+
+               inq_data_ptr->inqd_ver = 2;     /* claim compliance to SCSI-2 */
+               inq_data_ptr->inqd_dtq = 0x80;  /* set RMB bit to one indicating that the medium is removable */
+               inq_data_ptr->inqd_rdf = 2;     /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
+               inq_data_ptr->inqd_len = 31;
+               /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+               inq_data_ptr->inqd_pad2= 0x32 ;  /*WBus16|Sync|CmdQue */
+               /*
+                *      Set the Vendor, Product, and Revision Level
+                *      see: <vendor>.c i.e. aac.c
+                */
+               setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
+               if (scsicmd->target == scsicmd->host->this_id)
+                       inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
+               else
+                       inq_data_ptr->inqd_pdt = INQD_PDT_DA;   /* Direct/random access device */
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
                __aac_io_done(scsicmd);
                return 0;
-           }
        }
-    }
-    /*
-     * else Command for the controller itself
-     */
-    else if ((scsicmd->cmnd[0] != SS_INQUIR) &&        
-            (scsicmd->cmnd[0] != SS_TEST)) 
-    {
-       /* only INQUIRY & TUR cmnd supported for controller */
-       dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for "
-                "controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 
-           CHECK_CONDITION;
-       set_sense((u8 *) &sense_data[cid],
-                 SENKEY_ILLEGAL,
-                 SENCODE_INVALID_COMMAND,
-                 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
-       __aac_io_done(scsicmd);
-       return -1;
-    }
-    
-    
-    /* Handle commands here that don't require going out to the adapter */
-    switch (scsicmd->cmnd[0]) {
-    case SS_INQUIR:
-    {
-       struct inquiry_data *inq_data_ptr;
-       
-       dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
-       inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
-       memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
-       
-       inq_data_ptr->inqd_ver = 2;     /* claim compliance to SCSI-2 */
-       inq_data_ptr->inqd_dtq = 0x80;  /* set RMB bit to one indicating that the medium is removable */
-       inq_data_ptr->inqd_rdf = 2;     /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
-       inq_data_ptr->inqd_len = 31;
-       /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
-       inq_data_ptr->inqd_pad2= 0x32 ;  /*WBus16|Sync|CmdQue */
-       /*
-        *      Set the Vendor, Product, and Revision Level
-        *      see: <vendor>.c i.e. aac.c
-        */
-       setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
-       if (scsicmd->target == scsicmd->host->this_id)
-           inq_data_ptr->inqd_pdt = INQD_PDT_PROC;     /* Processor device */
-       else
-           inq_data_ptr->inqd_pdt = INQD_PDT_DA;       /* Direct/random access device */
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
-       __aac_io_done(scsicmd);
-       return 0;
-    }
-    case SS_RDCAP:
-    {
-       int capacity;
-       char *cp;
-       
-       dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
-       capacity = fsa_dev_ptr->size[cid] - 1;
-       cp = scsicmd->request_buffer;
-       cp[0] = (capacity >> 24) & 0xff;
-       cp[1] = (capacity >> 16) & 0xff;
-       cp[2] = (capacity >> 8) & 0xff;
-       cp[3] = (capacity >> 0) & 0xff;
-       cp[4] = 0;
-       cp[5] = 0;
-       cp[6] = 2;
-       cp[7] = 0;
-       
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
-       __aac_io_done(scsicmd);
-       
-       return 0;
-    }
-    
-    case SS_MODESEN:
-    {
-       char *mode_buf;
-       
-       dprintk((KERN_DEBUG "MODE SENSE command.\n"));
-       mode_buf = scsicmd->request_buffer;
-       mode_buf[0] = 0;  /* Mode data length (MSB) */
-       mode_buf[1] = 6;  /* Mode data length (LSB) */
-       mode_buf[2] = 0;  /* Medium type - default */
-       mode_buf[3] = 0;  /* Device-specific param, 
-                            bit 8: 0/1 = write enabled/protected */
-       mode_buf[4] = 0;  /* reserved */
-       mode_buf[5] = 0;  /* reserved */
-       mode_buf[6] = 0;  /* Block descriptor length (MSB) */
-       mode_buf[7] = 0;  /* Block descriptor length (LSB) */
-       
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
-       __aac_io_done(scsicmd);
-       
-       return 0;
-    }
-    case SS_REQSEN:
-       dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
-       memcpy(scsicmd->sense_buffer, &sense_data[cid], 
-              sizeof (struct sense_data));
-       memset(&sense_data[cid], 0, sizeof (struct sense_data));
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
-       __aac_io_done(scsicmd);
-       return (0);
-       
-    case SS_LOCK:
-       dprintk((KERN_DEBUG "LOCK command.\n"));
-       if (scsicmd->cmnd[4])
-           fsa_dev_ptr->locked[cid] = 1;
-       else
-           fsa_dev_ptr->locked[cid] = 0;
-       
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
-       __aac_io_done(scsicmd);
-       return 0;
+       case SS_RDCAP:
+       {
+               int capacity;
+               char *cp;
+
+               dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
+               capacity = fsa_dev_ptr->size[cid] - 1;
+               cp = scsicmd->request_buffer;
+               cp[0] = (capacity >> 24) & 0xff;
+               cp[1] = (capacity >> 16) & 0xff;
+               cp[2] = (capacity >> 8) & 0xff;
+               cp[3] = (capacity >> 0) & 0xff;
+               cp[4] = 0;
+               cp[5] = 0;
+               cp[6] = 2;
+               cp[7] = 0;
+
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+               __aac_io_done(scsicmd);
+
+               return 0;
+       }
+
+       case SS_MODESEN:
+       {
+               char *mode_buf;
+
+               dprintk((KERN_DEBUG "MODE SENSE command.\n"));
+               mode_buf = scsicmd->request_buffer;
+               mode_buf[0] = 0;        /* Mode data length (MSB) */
+               mode_buf[1] = 6;        /* Mode data length (LSB) */
+               mode_buf[2] = 0;        /* Medium type - default */
+               mode_buf[3] = 0;        /* Device-specific param, bit 8: 0/1 = write enabled/protected */
+               mode_buf[4] = 0;        /* reserved */
+               mode_buf[5] = 0;        /* reserved */
+               mode_buf[6] = 0;        /* Block descriptor length (MSB) */
+               mode_buf[7] = 0;        /* Block descriptor length (LSB) */
+
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+               __aac_io_done(scsicmd);
+
+               return 0;
+       }
+       case SS_REQSEN:
+               dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
+               memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof (struct sense_data));
+               memset(&sense_data[cid], 0, sizeof (struct sense_data));
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+               __aac_io_done(scsicmd);
+               return (0);
+
+       case SS_LOCK:
+               dprintk((KERN_DEBUG "LOCK command.\n"));
+               if (scsicmd->cmnd[4])
+                       fsa_dev_ptr->locked[cid] = 1;
+               else
+                       fsa_dev_ptr->locked[cid] = 0;
+
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+               __aac_io_done(scsicmd);
+               return 0;
        /*
         *      These commands are all No-Ops
         */
-    case SS_TEST:
-    case SS_RESERV:
-    case SS_RELES:
-    case SS_REZERO:
-    case SS_REASGN:
-    case SS_SEEK:
-    case SS_ST_SP:
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
-       __aac_io_done(scsicmd);
-       return (0);
-    }
-    
-    switch (scsicmd->cmnd[0]) 
-    {
-    case SS_READ:
-    case SM_READ:
-       /*
-        *      Hack to keep track of ordinal number of the device that
-        *      corresponds to a container. Needed to convert
-        *      containers to /dev/sd device names
-        */
-       
-       spin_unlock_irq(&io_request_lock);
-       fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->request.rq_dev);
-       ret = aac_read(scsicmd, cid);
-       spin_lock_irq(&io_request_lock);
-       return ret;
-       
-    case SS_WRITE:
-    case SM_WRITE:
-       spin_unlock_irq(&io_request_lock);
-       ret = aac_write(scsicmd, cid);
-       spin_lock_irq(&io_request_lock);
-       return ret;
-    default:
-       /*
-        *      Unhandled commands
-        */
-       printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", 
-              scsicmd->cmnd[0]);
-       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 
-           CHECK_CONDITION;
-       set_sense((u8 *) &sense_data[cid],
-                 SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
-                 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
-       __aac_io_done(scsicmd);
-       return -1;
-    }
+       case SS_TEST:
+       case SS_RESERV:
+       case SS_RELES:
+       case SS_REZERO:
+       case SS_REASGN:
+       case SS_SEEK:
+       case SS_ST_SP:
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
+               __aac_io_done(scsicmd);
+               return (0);
+       }
+
+       switch (scsicmd->cmnd[0]) 
+       {
+               case SS_READ:
+               case SM_READ:
+                       /*
+                        *      Hack to keep track of ordinal number of the device that
+                        *      corresponds to a container. Needed to convert
+                        *      containers to /dev/sd device names
+                        */
+                        
+                       spin_unlock_irq(&io_request_lock);
+                       fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->request.rq_dev);
+                       ret = aac_read(scsicmd, cid);
+                       spin_lock_irq(&io_request_lock);
+                       return ret;
+
+               case SS_WRITE:
+               case SM_WRITE:
+                       spin_unlock_irq(&io_request_lock);
+                       ret = aac_write(scsicmd, cid);
+                       spin_lock_irq(&io_request_lock);
+                       return ret;
+               default:
+                       /*
+                        *      Unhandled commands
+                        */
+                       printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
+                       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+                       set_sense((u8 *) &sense_data[cid],
+                               SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
+                       ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+                       __aac_io_done(scsicmd);
+                       return -1;
+       }
 }
 
 static int query_disk(struct aac_dev *dev, void *arg)
 {
-    struct aac_query_disk qd;
-    struct fsa_scsi_hba *fsa_dev_ptr;
-    
-    fsa_dev_ptr = &(dev->fsa_dev);
-    if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
-       return -EFAULT;
-    if (qd.cnum == -1)
-       qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
-    else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1)) 
-    {
-       if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
-           return -EINVAL;
-       qd.instance = dev->scsi_host_ptr->host_no;
-       qd.bus = 0;
-       qd.target = CONTAINER_TO_TARGET(qd.cnum);
-       qd.lun = CONTAINER_TO_LUN(qd.cnum);
-    }
-    else return -EINVAL;
-    
-    qd.valid = fsa_dev_ptr->valid[qd.cnum];
-    qd.locked = fsa_dev_ptr->locked[qd.cnum];
-    qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
-    
-    if (fsa_dev_ptr->devno[qd.cnum] == -1)
-       qd.unmapped = 1;
-    else
-       qd.unmapped = 0;
-    
-    get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
-    
-    if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
-       return -EFAULT;
-    return 0;
+       struct aac_query_disk qd;
+       struct fsa_scsi_hba *fsa_dev_ptr;
+
+       fsa_dev_ptr = &(dev->fsa_dev);
+       if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
+               return -EFAULT;
+       if (qd.cnum == -1)
+               qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
+       else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1)) 
+       {
+               if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
+                       return -EINVAL;
+               qd.instance = dev->scsi_host_ptr->host_no;
+               qd.bus = 0;
+               qd.target = CONTAINER_TO_TARGET(qd.cnum);
+               qd.lun = CONTAINER_TO_LUN(qd.cnum);
+       }
+       else return -EINVAL;
+
+       qd.valid = fsa_dev_ptr->valid[qd.cnum];
+       qd.locked = fsa_dev_ptr->locked[qd.cnum];
+       qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
+
+       if (fsa_dev_ptr->devno[qd.cnum] == -1)
+               qd.unmapped = 1;
+       else
+               qd.unmapped = 0;
+
+       get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
+
+       if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
+               return -EFAULT;
+       return 0;
 }
 
 static void get_sd_devname(int disknum, char *buffer)
 {
-    if (disknum < 0) {
-       sprintf(buffer, "%s", "");
-       return;
-    }
-    
-    if (disknum < 26)
-       sprintf(buffer, "sd%c", 'a' + disknum);
-    else {
-       unsigned int min1;
-       unsigned int min2;
-       /*
-        * For larger numbers of disks, we need to go to a new
-        * naming scheme.
-        */
-       min1 = disknum / 26;
-       min2 = disknum % 26;
-       sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
-    }
+       if (disknum < 0) {
+               sprintf(buffer, "%s", "");
+               return;
+       }
+
+       if (disknum < 26)
+               sprintf(buffer, "sd%c", 'a' + disknum);
+       else {
+               unsigned int min1;
+               unsigned int min2;
+               /*
+                * For larger numbers of disks, we need to go to a new
+                * naming scheme.
+                */
+               min1 = disknum / 26;
+               min2 = disknum % 26;
+               sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
+       }
 }
 
 static int force_delete_disk(struct aac_dev *dev, void *arg)
 {
-    struct aac_delete_disk dd;
-    struct fsa_scsi_hba *fsa_dev_ptr;
-    
-    fsa_dev_ptr = &(dev->fsa_dev);
-    
-    if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
-       return -EFAULT;
-    
-    if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
-       return -EINVAL;
-    /*
-     * Mark this container as being deleted.
-     */
-    fsa_dev_ptr->deleted[dd.cnum] = 1;
-    /*
-     * Mark the container as no longer valid
-     */
-    fsa_dev_ptr->valid[dd.cnum] = 0;
-    return 0;
+       struct aac_delete_disk dd;
+       struct fsa_scsi_hba *fsa_dev_ptr;
+
+       fsa_dev_ptr = &(dev->fsa_dev);
+
+       if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+               return -EFAULT;
+
+       if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
+               return -EINVAL;
+       /*
+        *      Mark this container as being deleted.
+        */
+       fsa_dev_ptr->deleted[dd.cnum] = 1;
+       /*
+        *      Mark the container as no longer valid
+        */
+       fsa_dev_ptr->valid[dd.cnum] = 0;
+       return 0;
 }
 
 static int delete_disk(struct aac_dev *dev, void *arg)
 {
-    struct aac_delete_disk dd;
-    struct fsa_scsi_hba *fsa_dev_ptr;
-
-    fsa_dev_ptr = &(dev->fsa_dev);
-
-    if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
-       return -EFAULT;
-
-    if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
-       return -EINVAL;
-    /*
-     * If the container is locked, it can not be deleted by the API.
-     */
-    if (fsa_dev_ptr->locked[dd.cnum])
-       return -EBUSY;
-    else {
+       struct aac_delete_disk dd;
+       struct fsa_scsi_hba *fsa_dev_ptr;
+
+       fsa_dev_ptr = &(dev->fsa_dev);
+
+       if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+               return -EFAULT;
+
+       if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
+               return -EINVAL;
        /*
-        *      Mark the container as no longer being valid.
+        *      If the container is locked, it can not be deleted by the API.
         */
-       fsa_dev_ptr->valid[dd.cnum] = 0;
-       fsa_dev_ptr->devno[dd.cnum] = -1;
-       return 0;
-    }
+       if (fsa_dev_ptr->locked[dd.cnum])
+               return -EBUSY;
+       else {
+               /*
+                *      Mark the container as no longer being valid.
+                */
+               fsa_dev_ptr->valid[dd.cnum] = 0;
+               fsa_dev_ptr->devno[dd.cnum] = -1;
+               return 0;
+       }
 }
 
 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
 {
-    switch (cmd) {
-    case FSACTL_QUERY_DISK:
-       return query_disk(dev, arg);
-    case FSACTL_DELETE_DISK:
-       return delete_disk(dev, arg);
-    case FSACTL_FORCE_DELETE_DISK:
-       return force_delete_disk(dev, arg);
-    case 2131:
-       return aac_get_containers(dev);
-    default:
-       return -ENOTTY;
-    }
+       switch (cmd) {
+       case FSACTL_QUERY_DISK:
+               return query_disk(dev, arg);
+       case FSACTL_DELETE_DISK:
+               return delete_disk(dev, arg);
+       case FSACTL_FORCE_DELETE_DISK:
+               return force_delete_disk(dev, arg);
+       case 2131:
+               return aac_get_containers(dev);
+       default:
+               return -ENOTTY;
+       }
 }
 
 /**
@@ -1235,160 +1264,189 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
 
 static void aac_srb_callback(void *context, struct fib * fibptr)
 {
-    struct aac_dev *dev;
-    struct aac_srb_reply *srbreply;
-    Scsi_Cmnd *scsicmd;
-
-    scsicmd = (Scsi_Cmnd *) context;
-    dev = (struct aac_dev *)scsicmd->host->hostdata;
-
-    if (fibptr == NULL)
-       BUG();
-
-    srbreply = (struct aac_srb_reply *) fib_data(fibptr);
-
-    scsicmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
-    // calculate resid for sg 
-    scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
-
-    if(scsicmd->use_sg)
-       pci_unmap_sg(dev->pdev, 
-                    (struct scatterlist *)scsicmd->buffer,
-                    scsicmd->use_sg,
-                    scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
-    else if(scsicmd->request_bufflen)
-       pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, 
-                        scsicmd->request_bufflen,
-                        scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
-
-    /*
-     * First check the fib status
-     */
-
-    if (le32_to_cpu(srbreply->status) != ST_OK){
-       int len;
-       printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", 
-              le32_to_cpu(srbreply->status));
-       len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
-           sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
-       scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | 
-           CHECK_CONDITION;
-       memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
-    }
-
-    /*
-     * Next check the srb status
-     */
-    switch(le32_to_cpu(srbreply->srb_status)){
-    case SRB_STATUS_ERROR_RECOVERY:
-    case SRB_STATUS_PENDING:
-    case SRB_STATUS_SUCCESS:
-       if(scsicmd->cmnd[0] == INQUIRY ){
-           u8 b;
-           /* We can't expose disk devices because we can't tell whether they
-            * are the raw container drives or stand alone drives
-            */
-           b = *(u8*)scsicmd->buffer;
-           if( (b & 0x0f) == TYPE_DISK ){
-               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
-           }
-       } else {
-           scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+       struct aac_dev *dev;
+       struct aac_srb_reply *srbreply;
+       Scsi_Cmnd *scsicmd;
+
+       scsicmd = (Scsi_Cmnd *) context;
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+
+       if (fibptr == NULL)
+               BUG();
+
+       srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
+       scsicmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
+       // calculate resid for sg 
+       scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
+
+       if(scsicmd->use_sg)
+               pci_unmap_sg(dev->pdev, 
+                       (struct scatterlist *)scsicmd->buffer,
+                       scsicmd->use_sg,
+                       scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+       else if(scsicmd->request_bufflen)
+               pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, scsicmd->request_bufflen,
+                       scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+
+       /*
+        * First check the fib status
+        */
+
+       if (le32_to_cpu(srbreply->status) != ST_OK){
+               int len;
+               printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
+               len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
+                               sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
+               scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
+               memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
        }
-       break;
-    case SRB_STATUS_DATA_OVERRUN:
-       switch(scsicmd->cmnd[0]){
-       case  READ_6:
-       case  WRITE_6:
-       case  READ_10:
-       case  WRITE_10:
-       case  READ_12:
-       case  WRITE_12:
-           if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
-               printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
-           } else {
-               printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
-           }
-           scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
-           break;
+
+       /*
+        * Next check the srb status
+        */
+       switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
+       case SRB_STATUS_ERROR_RECOVERY:
+       case SRB_STATUS_PENDING:
+       case SRB_STATUS_SUCCESS:
+               if(scsicmd->cmnd[0] == INQUIRY ){
+                       u8 b;
+                       u8 b1;
+                       /* We can't expose disk devices because we can't tell whether they
+                        * are the raw container drives or stand alone drives.  If they have
+                        * the removable bit set then we should expose them though.
+                        */
+                       b = (*(u8*)scsicmd->buffer)&0x1f;
+                       b1 = ((u8*)scsicmd->buffer)[1];
+                       if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 
+                                       || (b==TYPE_DISK && (b1&0x80)) ){
+                               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+                       /*
+                        * We will allow disk devices if in RAID/SCSI mode and
+                        * the channel is 2
+                        */
+                       } else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){
+                               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+                       } else {
+                               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+                       }
+               } else {
+                       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+               }
+               break;
+       case SRB_STATUS_DATA_OVERRUN:
+               switch(scsicmd->cmnd[0]){
+               case  READ_6:
+               case  WRITE_6:
+               case  READ_10:
+               case  WRITE_10:
+               case  READ_12:
+               case  WRITE_12:
+                       if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
+                               printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
+                       } else {
+                               printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
+                       }
+                       scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+                       break;
+               case INQUIRY: {
+                       u8 b;
+                       u8 b1;
+                       /* We can't expose disk devices because we can't tell whether they
+                       * are the raw container drives or stand alone drives
+                       */
+                       b = (*(u8*)scsicmd->buffer)&0x0f;
+                       b1 = ((u8*)scsicmd->buffer)[1];
+                       if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
+                                       || (b==TYPE_DISK && (b1&0x80)) ){
+                               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+                       /*
+                        * We will allow disk devices if in RAID/SCSI mode and
+                        * the channel is 2
+                        */
+                       } else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){
+                               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+                       } else {
+                               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+                       }
+                       break;
+               }
+               default:
+                       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+                       break;
+               }
+               break;
+       case SRB_STATUS_ABORTED:
+               scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+               break;
+       case SRB_STATUS_ABORT_FAILED:
+               // Not sure about this one - but assuming the hba was trying to abort for some reason
+               scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+               break;
+       case SRB_STATUS_PARITY_ERROR:
+               scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
+               break;
+       case SRB_STATUS_NO_DEVICE:
+       case SRB_STATUS_INVALID_PATH_ID:
+       case SRB_STATUS_INVALID_TARGET_ID:
+       case SRB_STATUS_INVALID_LUN:
+       case SRB_STATUS_SELECTION_TIMEOUT:
+               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+               break;
+
+       case SRB_STATUS_COMMAND_TIMEOUT:
+       case SRB_STATUS_TIMEOUT:
+               scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
+               break;
+
+       case SRB_STATUS_BUSY:
+               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+               break;
+
+       case SRB_STATUS_BUS_RESET:
+               scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
+               break;
+
+       case SRB_STATUS_MESSAGE_REJECTED:
+               scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+               break;
+       case SRB_STATUS_REQUEST_FLUSHED:
+       case SRB_STATUS_ERROR:
+       case SRB_STATUS_INVALID_REQUEST:
+       case SRB_STATUS_REQUEST_SENSE_FAILED:
+       case SRB_STATUS_NO_HBA:
+       case SRB_STATUS_UNEXPECTED_BUS_FREE:
+       case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+       case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+       case SRB_STATUS_DELAYED_RETRY:
+       case SRB_STATUS_BAD_FUNCTION:
+       case SRB_STATUS_NOT_STARTED:
+       case SRB_STATUS_NOT_IN_USE:
+       case SRB_STATUS_FORCE_ABORT:
+       case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
        default:
-           scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
-           break;
-       }
-       break;
-    case SRB_STATUS_ABORTED:
-       scsicmd->result = DID_ABORT << 16 | ABORT << 8;
-       break;
-    case SRB_STATUS_ABORT_FAILED:
-       // Not sure about this one - but assuming the hba was trying 
-       // to abort for some reason
-       scsicmd->result = DID_ERROR << 16 | ABORT << 8;
-       break;
-    case SRB_STATUS_PARITY_ERROR:
-       scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
-       break;
-    case SRB_STATUS_NO_DEVICE:
-    case SRB_STATUS_INVALID_PATH_ID:
-    case SRB_STATUS_INVALID_TARGET_ID:
-    case SRB_STATUS_INVALID_LUN:
-    case SRB_STATUS_SELECTION_TIMEOUT:
-       scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
-       break;
-
-    case SRB_STATUS_COMMAND_TIMEOUT:
-    case SRB_STATUS_TIMEOUT:
-       scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
-       break;
-
-    case SRB_STATUS_BUSY:
-       scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
-       break;
-
-    case SRB_STATUS_BUS_RESET:
-       scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
-       break;
-
-    case SRB_STATUS_MESSAGE_REJECTED:
-       scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
-       break;
-    case SRB_STATUS_REQUEST_FLUSHED:
-    case SRB_STATUS_ERROR:
-    case SRB_STATUS_INVALID_REQUEST:
-    case SRB_STATUS_REQUEST_SENSE_FAILED:
-    case SRB_STATUS_NO_HBA:
-    case SRB_STATUS_UNEXPECTED_BUS_FREE:
-    case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
-    case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
-    case SRB_STATUS_DELAYED_RETRY:
-    case SRB_STATUS_BAD_FUNCTION:
-    case SRB_STATUS_NOT_STARTED:
-    case SRB_STATUS_NOT_IN_USE:
-    case SRB_STATUS_FORCE_ABORT:
-    case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
-    default:
 #ifdef AAC_DETAILED_STATUS_INFO
-       printk("aacraid: SRB ERROR (%s)\n", 
-              aac_get_status_string(le32_to_cpu(srbreply->srb_status)));
+               printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",le32_to_cpu(srbreply->srb_status&0x3f),aac_get_status_string(le32_to_cpu(srbreply->srb_status)), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status) );
 #endif
-       scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
-       break;
-    }
-    if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
-       int len;
-       len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
-           sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
-       printk(KERN_WARNING "aac_srb_callback: check condition, "
-              "status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
-       memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
-    }
-    /*
-     * OR in the scsi status (already shifted up a bit)
-     */
-    scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
-
-    fib_complete(fibptr);
-    fib_free(fibptr);
-    aac_io_done(scsicmd);
+               scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+               break;
+       }
+       if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
+               int len;
+               scsicmd->result |= CHECK_CONDITION;
+               len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
+                               sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
+               printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
+               memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
+       }
+       /*
+        * OR in the scsi status (already shifted up a bit)
+        */
+       scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
+
+       fib_complete(fibptr);
+       fib_free(fibptr);
+       aac_io_done(scsicmd);
 }
 
 /**
@@ -1402,227 +1460,230 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
 
 static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
 {
-    struct fib* cmd_fibcontext;
-    struct aac_dev* dev;
-    int status;
-    struct aac_srb *srbcmd;
-    u16 fibsize;
-    u32 flag;
-
-    if( scsicmd->target > 15 || scsicmd->lun > 7) {
-       scsicmd->result = DID_NO_CONNECT << 16;
-       __aac_io_done(scsicmd);
-       return 0;
-    }
-
-    dev = (struct aac_dev *)scsicmd->host->hostdata;
-    switch(scsicmd->sc_data_direction){
-    case SCSI_DATA_WRITE:
-       flag = SRB_DataOut;
-       break;
-    case SCSI_DATA_UNKNOWN:  
-       flag = SRB_DataIn | SRB_DataOut;
-       break;
-    case SCSI_DATA_READ:
-       flag = SRB_DataIn;
-       break;
-    case SCSI_DATA_NONE: 
-    default:
-       flag = SRB_NoDataXfer;
-       break;
-    }
-
-
-    /*
-     * Allocate and initialize a Fib then setup a BlockWrite command
-     */
-    if (!(cmd_fibcontext = fib_alloc(dev))) {
-       scsicmd->result = DID_ERROR << 16;
-       __aac_io_done(scsicmd);
-       return -1;
-    }
-    fib_init(cmd_fibcontext);
-
-    srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
-    srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
-    srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
-    srbcmd->target   = cpu_to_le32(scsicmd->target);
-    srbcmd->lun      = cpu_to_le32(scsicmd->lun);
-    srbcmd->flags    = cpu_to_le32(flag);
-    srbcmd->timeout  = cpu_to_le32(0);  // timeout not used
-    srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
-    srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
-       
-    if( dev->pae_support ==1 ) {
-       aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
-       srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+       struct fib* cmd_fibcontext;
+       struct aac_dev* dev;
+       int status;
+       struct aac_srb *srbcmd;
+       u16 fibsize;
+       u32 flag;
+       u32 timeout;
+
+       if( scsicmd->target > 15 || scsicmd->lun > 7) {
+               scsicmd->result = DID_NO_CONNECT << 16;
+               __aac_io_done(scsicmd);
+               return 0;
+       }
+
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+       switch(scsicmd->sc_data_direction){
+       case SCSI_DATA_WRITE:
+               flag = SRB_DataOut;
+               break;
+       case SCSI_DATA_UNKNOWN:  
+               flag = SRB_DataIn | SRB_DataOut;
+               break;
+       case SCSI_DATA_READ:
+               flag = SRB_DataIn;
+               break;
+       case SCSI_DATA_NONE: 
+       default:
+               flag = SRB_NoDataXfer;
+               break;
+       }
 
-       memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
-       memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
-       /*
-        *      Build Scatter/Gather list
-        */
-       fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) 
-                                            * sizeof (struct sgentry64));
 
        /*
-        *      Now send the Fib to the adapter
+        *      Allocate and initialize a Fib then setup a BlockWrite command
         */
-       status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, 
-                         FsaNormal, 0, 1, (fib_callback) aac_srb_callback, 
-                         (void *) scsicmd);
-    } else {
-       aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
-       srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+       if (!(cmd_fibcontext = fib_alloc(dev))) {
+               scsicmd->result = DID_ERROR << 16;
+               __aac_io_done(scsicmd);
+               return -1;
+       }
+       fib_init(cmd_fibcontext);
+
+       srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
+       srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+       srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
+       srbcmd->target   = cpu_to_le32(scsicmd->target);
+       srbcmd->lun      = cpu_to_le32(scsicmd->lun);
+       srbcmd->flags    = cpu_to_le32(flag);
+       timeout = (scsicmd->timeout-jiffies)/HZ;
+       if(timeout == 0){
+               timeout = 1;
+       }
+       srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
+       srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
+       srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
+       
+       if( dev->pae_support ==1 ) {
+               aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
+               srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+
+               memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+               memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
+               /*
+                *      Build Scatter/Gather list
+                */
+               fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
+
+               /*
+                *      Now send the Fib to the adapter
+                */
+               status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
+                                 (fib_callback) aac_srb_callback, (void *) scsicmd);
+       } else {
+               aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
+               srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
+
+               memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+               memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
+               /*
+                *      Build Scatter/Gather list
+                */
+               fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
 
-       memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
-       memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
+               /*
+                *      Now send the Fib to the adapter
+                */
+               status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
+                                 (fib_callback) aac_srb_callback, (void *) scsicmd);
+       }
        /*
-        *      Build Scatter/Gather list
+        *      Check that the command queued to the controller
         */
-       fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) 
-                                            * sizeof (struct sgentry));
+       if (status == -EINPROGRESS){
+               return 0;
+       }
 
+       printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
        /*
-        *      Now send the Fib to the adapter
+        *      For some reason, the Fib didn't queue, return QUEUE_FULL
         */
-       status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, 
-                         FsaNormal, 0, 1, (fib_callback) aac_srb_callback, 
-                         (void *) scsicmd);
-    }
-    /*
-     * Check that the command queued to the controller
-     */
-    if (status == -EINPROGRESS){
-       return 0;
-    }
-
-    printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
-    /*
-     * For some reason, the Fib didn't queue, return QUEUE_FULL
-     */
-    scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
-    __aac_io_done(scsicmd);
+       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
+       __aac_io_done(scsicmd);
 
-    fib_complete(cmd_fibcontext);
-    fib_free(cmd_fibcontext);
+       fib_complete(cmd_fibcontext);
+       fib_free(cmd_fibcontext);
 
-    return -1;
+       return -1;
 }
 
 static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
 {
-    struct aac_dev *dev;
-    unsigned long byte_count = 0;
-
-    dev = (struct aac_dev *)scsicmd->host->hostdata;
-    // Get rid of old data
-    psg->count = cpu_to_le32(0);
-    psg->sg[0].addr = cpu_to_le32(NULL);
-    psg->sg[0].count = cpu_to_le32(0);  
-    if (scsicmd->use_sg) {
-       struct scatterlist *sg;
-       int i;
-       int sg_count;
-       sg = (struct scatterlist *) scsicmd->request_buffer;
+       struct aac_dev *dev;
+       unsigned long byte_count = 0;
 
-       sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
-                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
-       psg->count = cpu_to_le32(sg_count);
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+       // Get rid of old data
+       psg->count = cpu_to_le32(0);
+       psg->sg[0].addr = cpu_to_le32(NULL);
+       psg->sg[0].count = cpu_to_le32(0);  
+       if (scsicmd->use_sg) {
+               struct scatterlist *sg;
+               int i;
+               int sg_count;
+               sg = (struct scatterlist *) scsicmd->request_buffer;
+
+               sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
+                       scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+               psg->count = cpu_to_le32(sg_count);
 
-       byte_count = 0;
+               byte_count = 0;
 
-       for (i = 0; i < sg_count; i++) {
-           psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
-           psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
-           byte_count += sg_dma_len(sg);
-           sg++;
-       }
-       /* hba wants the size to be exact */
-       if(byte_count > scsicmd->request_bufflen){
-           psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
-           byte_count = scsicmd->request_bufflen;
+               for (i = 0; i < sg_count; i++) {
+                       psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
+                       psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+                       byte_count += sg_dma_len(sg);
+                       sg++;
+               }
+               /* hba wants the size to be exact */
+               if(byte_count > scsicmd->request_bufflen){
+                       psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
+                       byte_count = scsicmd->request_bufflen;
+               }
+               /* Check for command underflow */
+               if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+                       printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+                                       byte_count, scsicmd->underflow);
+               }
        }
-       /* Check for command underflow */
-       if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-           printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-                  byte_count, scsicmd->underflow);
+       else if(scsicmd->request_bufflen) {
+               dma_addr_t addr; 
+               addr = pci_map_single(dev->pdev,
+                               scsicmd->request_buffer,
+                               scsicmd->request_bufflen,
+                               scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+               psg->count = cpu_to_le32(1);
+               psg->sg[0].addr = cpu_to_le32(addr);
+               psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
+               /* Cast to pointer from integer of different size */
+               scsicmd->SCp.ptr = (void *)addr;
+               byte_count = scsicmd->request_bufflen;
        }
-    }
-    else if(scsicmd->request_bufflen) {
-       dma_addr_t addr; 
-       addr = pci_map_single(dev->pdev,
-                             scsicmd->request_buffer,
-                             scsicmd->request_bufflen,
-                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
-       psg->count = cpu_to_le32(1);
-       psg->sg[0].addr = cpu_to_le32(addr);
-       psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
-       scsicmd->SCp.ptr = (void *)addr;
-       byte_count = scsicmd->request_bufflen;
-    }
-    return byte_count;
+       return byte_count;
 }
 
 
 static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
 {
-    struct aac_dev *dev;
-    unsigned long byte_count = 0;
-    u64 le_addr;
-
-    dev = (struct aac_dev *)scsicmd->host->hostdata;
-    // Get rid of old data
-    psg->count = cpu_to_le32(0);
-    psg->sg[0].addr[0] = cpu_to_le32(NULL);
-    psg->sg[0].addr[1] = cpu_to_le32(NULL);
-    psg->sg[0].count = cpu_to_le32(0);  
-    if (scsicmd->use_sg) {
-       struct scatterlist *sg;
-       int i;
-       int sg_count;
-       sg = (struct scatterlist *) scsicmd->request_buffer;
-
-       sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
-                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
-       psg->count = cpu_to_le32(sg_count);
-
-       byte_count = 0;
-
-       for (i = 0; i < sg_count; i++) {
-           le_addr = cpu_to_le64(sg_dma_address(sg));
-           psg->sg[i].addr[1] = (u32)(le_addr>>32);
-           psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
-           psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
-           byte_count += sg_dma_len(sg);
-           sg++;
-       }
-       /* hba wants the size to be exact */
-       if(byte_count > scsicmd->request_bufflen){
-           psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
-           byte_count = scsicmd->request_bufflen;
+       struct aac_dev *dev;
+       unsigned long byte_count = 0;
+       u64 le_addr;
+
+       dev = (struct aac_dev *)scsicmd->host->hostdata;
+       // Get rid of old data
+       psg->count = cpu_to_le32(0);
+       psg->sg[0].addr[0] = cpu_to_le32(NULL);
+       psg->sg[0].addr[1] = cpu_to_le32(NULL);
+       psg->sg[0].count = cpu_to_le32(0);  
+       if (scsicmd->use_sg) {
+               struct scatterlist *sg;
+               int i;
+               int sg_count;
+               sg = (struct scatterlist *) scsicmd->request_buffer;
+
+               sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
+                       scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+               psg->count = cpu_to_le32(sg_count);
+
+               byte_count = 0;
+
+               for (i = 0; i < sg_count; i++) {
+                       le_addr = cpu_to_le64(sg_dma_address(sg));
+                       psg->sg[i].addr[1] = (u32)(le_addr>>32);
+                       psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
+                       psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+                       byte_count += sg_dma_len(sg);
+                       sg++;
+               }
+               /* hba wants the size to be exact */
+               if(byte_count > scsicmd->request_bufflen){
+                       psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
+                       byte_count = scsicmd->request_bufflen;
+               }
+               /* Check for command underflow */
+               if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+                       printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+                                       byte_count, scsicmd->underflow);
+               }
        }
-       /* Check for command underflow */
-       if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
-           printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
-                  byte_count, scsicmd->underflow);
+       else if(scsicmd->request_bufflen) {
+               dma_addr_t addr; 
+               addr = pci_map_single(dev->pdev,
+                               scsicmd->request_buffer,
+                               scsicmd->request_bufflen,
+                               scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
+               psg->count = cpu_to_le32(1);
+               le_addr = cpu_to_le64(addr);
+               psg->sg[0].addr[1] = (u32)(le_addr>>32);
+               psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
+               psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
+               /* Cast to pointer from integer of different size */
+               scsicmd->SCp.ptr = (void *)addr;
+               byte_count = scsicmd->request_bufflen;
        }
-    }
-    else if(scsicmd->request_bufflen) {
-       dma_addr_t addr; 
-       addr = pci_map_single(dev->pdev,
-                             scsicmd->request_buffer,
-                             scsicmd->request_bufflen,
-                             scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
-       psg->count = cpu_to_le32(1);
-       le_addr = cpu_to_le64(addr);
-       psg->sg[0].addr[1] = (u32)(le_addr>>32);
-       psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
-       psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
-       scsicmd->SCp.ptr = (void *)addr;
-       byte_count = scsicmd->request_bufflen;
-    }
-    return byte_count;
+       return byte_count;
 }
 
 #ifdef AAC_DETAILED_STATUS_INFO
index ce6566bcc17b629479dbe53cb8bbbdb80967be8a..eb38cd1361e0baa1859478e86b95f48eb6ea8af2 100644 (file)
@@ -1,16 +1,13 @@
+//#define dprintk(x) printk x
+#if (!defined(dprintk))
+# define dprintk(x)
+#endif
 
-/* #define dprintk(x) */
-// #define dprintk(x) printk x
-#define dprintk(x)
-
-
+/* Start of Xen additions XXX */
 #include <asm/byteorder.h>
-
+#include <xeno/interrupt.h>
 #define TRY_TASKLET
-#ifdef TRY_TASKLET
-/* XXX SMH: trying to use softirqs to trigger stuff done prev by threads */
-#include <xeno/interrupt.h>  /* for tasklet/softirq stuff */
-#endif
+/* End of Xen additions XXX */
 
 /*------------------------------------------------------------------------------
  *              D E F I N E S
 #define MAXIMUM_NUM_CONTAINERS 31
 #define MAXIMUM_NUM_ADAPTERS   8
 
-#define AAC_NUM_FIB    578
-#define AAC_NUM_IO_FIB 512
+#define AAC_NUM_FIB            578
+//#define AAC_NUM_IO_FIB       512
+#define AAC_NUM_IO_FIB         100
 
-#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
+#define AAC_MAX_TARGET         (MAXIMUM_NUM_CONTAINERS+1)
 //#define AAC_MAX_TARGET       (16)
-#define AAC_MAX_LUN    (8)
+#define AAC_MAX_LUN            (8)
 
 /*
  * These macros convert from physical channels to virtual channels
@@ -266,27 +264,25 @@ enum aac_queue_types {
  */
 
 struct aac_fibhdr {
-    u32 XferState;             // Current transfer state for this CCB
-    u16 Command;               // Routing information for the destination
-    u8 StructType;             // Type FIB
-    u8 Flags;                  // Flags for FIB
-    u16 Size;                  // Size of this FIB in bytes
-    u16 SenderSize;            // Size of the FIB in the sender (for 
-                                // response sizing)
-    u32 SenderFibAddress;      // Host defined data in the FIB
-    u32 ReceiverFibAddress;    // Logical address of this FIB for the adapter
-    u32 SenderData;            // Place holder for the sender to store data
-    union {
-       struct {
-           u32 _ReceiverTimeStart;  // Timestamp for receipt of fib
-           u32 _ReceiverTimeDone;   // Timestamp for completion of fib
-       } _s;
-       struct list_head _FibLinks;  // Used to link Adapter Initiated 
-                                    // Fibs on the host
-    } _u;
+       u32 XferState;                  // Current transfer state for this CCB
+       u16 Command;                    // Routing information for the destination
+       u8 StructType;                  // Type FIB
+       u8 Flags;                       // Flags for FIB
+       u16 Size;                       // Size of this FIB in bytes
+       u16 SenderSize;                 // Size of the FIB in the sender (for response sizing)
+       u32 SenderFibAddress;           // Host defined data in the FIB
+       u32 ReceiverFibAddress;         // Logical address of this FIB for the adapter
+       u32 SenderData;                 // Place holder for the sender to store data
+       union {
+               struct {
+                   u32 _ReceiverTimeStart;     // Timestamp for receipt of fib
+                   u32 _ReceiverTimeDone;      // Timestamp for completion of fib
+               } _s;
+//             struct aac_list_head _FibLinks; // Used to link Adapter Initiated Fibs on the host
+       } _u;
 };
 
-#define FibLinks                       _u._FibLinks
+//#define FibLinks                     _u._FibLinks
 
 #define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
 
@@ -451,6 +447,8 @@ struct aac_driver_ident
        char *  vname;
        char *  model;
        u16     channels;
+       int     quirks;
+#define AAC_QUIRK_31BIT                        1
 };
 
 /*
@@ -471,8 +469,7 @@ struct aac_queue {
 #if 0
        wait_queue_head_t       qfull;                  /* Event to wait on if the queue is full */
        wait_queue_head_t       cmdready;               /* Indicates there is a Command ready from the adapter on this queue. */
-#endif
-                                                       /* This is only valid for adapter to host command queues. */                      
+#endif                                                 /* This is only valid for adapter to host command queues. */                      
        spinlock_t              *lock;                  /* Spinlock for this queue must take this lock before accessing the lock */
        spinlock_t              lockdata;               /* Actual lock (used only on one side of the lock) */
        unsigned long           SavedIrql;              /* Previous IRQL when the spin lock is taken */
@@ -646,7 +643,7 @@ struct aac_fib_context {
 #endif
        int                     wait;           // Set to true when thread is in WaitForSingleObject
        unsigned long           count;          // total number of FIBs on FibList
-       struct list_head        fibs;
+       struct list_head        fib_list;       // this holds fibs which should be 32 bit addresses
 };
 
 struct fsa_scsi_hba {
@@ -656,7 +653,7 @@ struct fsa_scsi_hba {
        u8              ro[MAXIMUM_NUM_CONTAINERS];
        u8              locked[MAXIMUM_NUM_CONTAINERS];
        u8              deleted[MAXIMUM_NUM_CONTAINERS];
-       u32             devno[MAXIMUM_NUM_CONTAINERS];
+       s32             devno[MAXIMUM_NUM_CONTAINERS];
 };
 
 struct fib {
@@ -667,7 +664,6 @@ struct fib {
         *      The Adapter that this I/O is destined for.
         */
        struct aac_dev          *dev;
-       u64                     logicaladdr;    /* 64 bit */
 #if 0
        /*
         *      This is the event the sendfib routine will wait on if the
@@ -686,9 +682,14 @@ struct fib {
         *      Outstanding I/O queue.
         */
        struct list_head        queue;
-
+       /*
+        *      And for the internal issue/reply queues (we may be able
+        *      to merge these two)
+        */
+       struct list_head        fiblink;
        void                    *data;
-       struct hw_fib           *fib;           /* Actual shared object */
+       struct hw_fib           *hw_fib;                /* Actual shared object */
+       dma_addr_t              hw_fib_pa;              /* physical address of hw_fib*/
 };
 
 /*
@@ -715,6 +716,7 @@ struct aac_adapter_info
        u32     biosrev;
        u32     biosbuild;
        u32     cluster;
+       u32     clusterchannelmask; 
        u32     serial[2];
        u32     battery;
        u32     options;
@@ -739,19 +741,22 @@ struct aac_adapter_info
 /*
  * Supported Options
  */
-#define AAC_OPT_SNAPSHOT       cpu_to_le32(1)
-#define AAC_OPT_CLUSTERS       cpu_to_le32(1<<1)
-#define AAC_OPT_WRITE_CACHE    cpu_to_le32(1<<2)
-#define AAC_OPT_64BIT_DATA     cpu_to_le32(1<<3)
-#define AAC_OPT_HOST_TIME_FIB  cpu_to_le32(1<<4)
-#define AAC_OPT_RAID50         cpu_to_le32(1<<5)
-#define AAC_OPT_4GB_WINDOW     cpu_to_le32(1<<6)
-#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
-#define AAC_OPT_SOFT_ERR_REPORT        cpu_to_le32(1<<8)
-#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
-#define AAC_OPT_SGMAP_HOST64   cpu_to_le32(1<<10)
-#define AAC_OPT_ALARM          cpu_to_le32(1<<11)
-#define AAC_OPT_NONDASD                cpu_to_le32(1<<12)
+#define AAC_OPT_SNAPSHOT               cpu_to_le32(1)
+#define AAC_OPT_CLUSTERS               cpu_to_le32(1<<1)
+#define AAC_OPT_WRITE_CACHE            cpu_to_le32(1<<2)
+#define AAC_OPT_64BIT_DATA             cpu_to_le32(1<<3)
+#define AAC_OPT_HOST_TIME_FIB          cpu_to_le32(1<<4)
+#define AAC_OPT_RAID50                 cpu_to_le32(1<<5)
+#define AAC_OPT_4GB_WINDOW             cpu_to_le32(1<<6)
+#define AAC_OPT_SCSI_UPGRADEABLE       cpu_to_le32(1<<7)
+#define AAC_OPT_SOFT_ERR_REPORT                cpu_to_le32(1<<8)
+#define AAC_OPT_SUPPORTED_RECONDITION  cpu_to_le32(1<<9)
+#define AAC_OPT_SGMAP_HOST64           cpu_to_le32(1<<10)
+#define AAC_OPT_ALARM                  cpu_to_le32(1<<11)
+#define AAC_OPT_NONDASD                        cpu_to_le32(1<<12)
+#define AAC_OPT_SCSI_MANAGED           cpu_to_le32(1<<13)
+#define AAC_OPT_RAID_SCSI_MODE         cpu_to_le32(1<<14)
+#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO        cpu_to_le32(1<<15)
 
 struct aac_dev
 {
@@ -765,13 +770,12 @@ struct aac_dev
         */     
        dma_addr_t              hw_fib_pa;
        struct hw_fib           *hw_fib_va;
-#if BITS_PER_LONG >= 64
        ulong                   fib_base_va;
-#endif
        /*
         *      Fib Headers
         */
-       struct fib              fibs[AAC_NUM_FIB];
+       struct fib              *fibs;
+
        struct fib              *free_fib;
        struct fib              *timeout_fib;
        spinlock_t              fib_lock;
@@ -800,7 +804,9 @@ struct aac_dev
 
        struct Scsi_Host        *scsi_host_ptr;
        struct fsa_scsi_hba     fsa_dev;
-       int                     thread_pid;
+#if 0
+       pid_t                   thread_pid;
+#endif
        int                     cardtype;
        
        /*
@@ -825,8 +831,15 @@ struct aac_dev
         */
        u8                      nondasd_support; 
        u8                      pae_support;
+       u8                      raid_scsi_mode;
 };
 
+#define AllocateAndMapFibSpace(dev, MapFibContext) \
+       dev->a_ops.AllocateAndMapFibSpace(dev, MapFibContext)
+
+#define UnmapAndFreeFibSpace(dev, MapFibContext) \
+       dev->a_ops.UnmapAndFreeFibSpace(dev, MapFibContext)
+
 #define aac_adapter_interrupt(dev) \
        dev->a_ops.adapter_interrupt(dev)
 
@@ -1163,7 +1176,9 @@ struct aac_mntent {
        u32                     altoid;         // != oid <==> snapshot or broken mirror exists
 };
 
-#define FSCS_READONLY  0x0002  /*      possible result of broken mirror */
+#define FSCS_NOTCLEAN  0x0001          /* fsck is neccessary before mounting */
+#define FSCS_READONLY  0x0002          /* possible result of broken mirror */
+#define FSCS_HIDDEN    0x0004          /* should be ignored - set during a clear */
 
 struct aac_query_mount {
        u32             command;
@@ -1347,9 +1362,12 @@ extern struct aac_common aac_config;
  */
  
 #define        AifCmdEventNotify       1       /* Notify of event */
+#define                AifEnContainerChange    4       /* Container configuration change */
 #define                AifCmdJobProgress       2       /* Progress report */
 #define                AifCmdAPIReport         3       /* Report from other user of API */
 #define                AifCmdDriverNotify      4       /* Notify host driver of event */
+#define                AifDenMorphComplete     200     /* A morph operation completed */
+#define                AifDenVolumeExtendComplete 201  /* A volume expand operation completed */
 #define                AifReqJobList           100     /* Gets back complete job list */
 #define                AifReqJobsForCtr        101     /* Gets back jobs for specific container */
 #define                AifReqJobsForScsi       102     /* Gets back jobs for specific SCSI device */ 
@@ -1374,16 +1392,6 @@ struct aac_aifcmd {
        u8 data[1];             /* Undefined length (from kernel viewpoint) */
 };
 
-static inline u32 fib2addr(struct hw_fib *hw)
-{
-       return (u32)hw;
-}
-
-static inline struct hw_fib *addr2fib(u32 addr)
-{
-       return (struct hw_fib *)addr;
-}
-
 const char *aac_driverinfo(struct Scsi_Host *);
 struct fib *fib_alloc(struct aac_dev *dev);
 int fib_setup(struct aac_dev *dev);
@@ -1397,7 +1405,7 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr
 int aac_consumer_avail(struct aac_dev * dev, struct aac_queue * q);
 void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
 int fib_complete(struct fib * context);
-#define fib_data(fibctx) ((void *)(fibctx)->fib->data)
+#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
 int aac_detach(struct aac_dev *dev);
 struct aac_dev *aac_init_adapter(struct aac_dev *dev);
 int aac_get_containers(struct aac_dev *dev);
index 15b6a62c6f0f98e651c0aac7cc6565ad1f225ee3..2169d34f43c3ee819d3b834f067bb113bf7204b1 100644 (file)
  *
  */
 
-#include <xeno/config.h>
-/*  #include <xeno/kernel.h> */
-#include <xeno/init.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-#include <xeno/pci.h>
-/*  #include <xeno/spinlock.h> */
-/*  #include <xeno/slab.h> */
-/*  #include <xeno/completion.h> */
-#include <xeno/blk.h>
-/*  #include <asm/semaphore.h> */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+/*#include <linux/completion.h>*/
+#include <linux/blk.h>
+/*#include <asm/semaphore.h>*/
 #include <asm/uaccess.h>
 #include "scsi.h"
 #include "hosts.h"
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void *arg)
        if(fibptr == NULL)
                return -ENOMEM;
                
-       kfib = fibptr->fib;
+       kfib = fibptr->hw_fib;
        /*
         *      First copy in the header so that we can check the size field.
         */
@@ -152,7 +152,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void *arg)
                 *      the list to 0.
                 */
                fibctx->count = 0;
-               INIT_LIST_HEAD(&fibctx->fibs);
+               INIT_LIST_HEAD(&fibctx->fib_list);
                fibctx->jiffies = jiffies/HZ;
                /*
                 *      Now add this context onto the adapter's 
@@ -183,7 +183,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
 {
        struct fib_ioctl f;
        struct aac_fib_context *fibctx, *aifcp;
-       struct hw_fib * fib;
+       struct fib * fib;
        int status;
        struct list_head * entry;
        int found;
@@ -213,12 +213,16 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
                }
                entry = entry->next;
        }
-       if (found == 0)
+       if (found == 0) {
+               dprintk ((KERN_INFO "Fib not found\n"));
                return -EINVAL;
+       }
 
        if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
-                (fibctx->size != sizeof(struct aac_fib_context)))
+                (fibctx->size != sizeof(struct aac_fib_context))) {
+               dprintk ((KERN_INFO "Fib Context corrupt?\n"));
                return -EINVAL;
+       }
        status = 0;
        spin_lock_irqsave(&dev->fib_lock, flags);
        /*
@@ -226,27 +230,28 @@ static int next_getadapter_fib(struct aac_dev * dev, void *arg)
         *      -EAGAIN
         */
 return_fib:
-       if (!list_empty(&fibctx->fibs)) {
+       if (!list_empty(&fibctx->fib_list)) {
                struct list_head * entry;
                /*
                 *      Pull the next fib from the fibs
                 */
-               entry = fibctx->fibs.next;
+               entry = fibctx->fib_list.next;
                list_del(entry);
                
-               fib = list_entry(entry, struct hw_fib, header.FibLinks);
+               fib = list_entry(entry, struct fib, fiblink);
                fibctx->count--;
                spin_unlock_irqrestore(&dev->fib_lock, flags);
-               if (copy_to_user(f.fib, fib, sizeof(struct hw_fib))) {
+               if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
+                       kfree(fib->hw_fib);
                        kfree(fib);
                        return -EFAULT;
                }       
                /*
                 *      Free the space occupied by this copy of the fib.
                 */
+               kfree(fib->hw_fib);
                kfree(fib);
                status = 0;
-               fibctx->jiffies = jiffies/HZ;
        } else {
                spin_unlock_irqrestore(&dev->fib_lock, flags);
                if (f.wait) {
@@ -255,7 +260,7 @@ return_fib:
                                status = -EINTR;
                        } else {
 #else
-                           {
+                       {
 #endif
                                /* Lock again and retry */
                                spin_lock_irqsave(&dev->fib_lock, flags);
@@ -265,28 +270,30 @@ return_fib:
                        status = -EAGAIN;
                }       
        }
+       fibctx->jiffies = jiffies/HZ;
        return status;
 }
 
 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
 {
-       struct hw_fib *fib;
+       struct fib *fib;
 
        /*
         *      First free any FIBs that have not been consumed.
         */
-       while (!list_empty(&fibctx->fibs)) {
+       while (!list_empty(&fibctx->fib_list)) {
                struct list_head * entry;
                /*
                 *      Pull the next fib from the fibs
                 */
-               entry = fibctx->fibs.next;
+               entry = fibctx->fib_list.next;
                list_del(entry);
-               fib = list_entry(entry, struct hw_fib, header.FibLinks);
+               fib = list_entry(entry, struct fib, fiblink);
                fibctx->count--;
                /*
                 *      Free the space occupied by this copy of the fib.
                 */
+               kfree(fib->hw_fib);
                kfree(fib);
        }
        /*
index b4681a473b47578ebee3e2a4dd9b81e3b66d2931..531361e55de17b9a5314dd31721b5aae6e4cefc3 100644 (file)
  *
  */
 
-#include <xeno/config.h>
-/* #include <xeno/kernel.h> */
-#include <xeno/init.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-#include <xeno/pci.h>
-#include <xeno/spinlock.h>
-/* #include <xeno/slab.h> */
-#include <xeno/blk.h>
-/* #include <xeno/completion.h> */
-/* #include <asm/semaphore.h> */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blk.h>
+/*#include <linux/completion.h>*/
+#include <linux/mm.h>
+/*#include <asm/semaphore.h>*/
 #include "scsi.h"
 #include "hosts.h"
 
@@ -58,7 +59,6 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
        struct aac_init *init;
        dma_addr_t phys;
 
-       /* FIXME: Adaptec add 128 bytes to this value - WHY ?? */
        size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
 
        base = pci_alloc_consistent(dev->pdev, size, &phys);
@@ -74,14 +74,6 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
        dev->init = (struct aac_init *)(base + fibsize);
        dev->init_pa = phys + fibsize;
 
-       /*
-        *      Cache the upper bits of the virtual mapping for 64bit boxes
-        *      FIXME: this crap should be rewritten
-        */
-#if BITS_PER_LONG >= 64 
-       dev->fib_base_va = ((ulong)base & 0xffffffff00000000);
-#endif
-
        init = dev->init;
 
        init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
@@ -92,16 +84,20 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
         *      Adapter Fibs are the first thing allocated so that they
         *      start page aligned
         */
-       init->AdapterFibsVirtualAddress = cpu_to_le32((u32)base);
-       init->AdapterFibsPhysicalAddress = cpu_to_le32(phys);
+       dev->fib_base_va = (ulong)base;
+
+       /* We submit the physical address for AIF tags to limit to 32 bits */
+       init->AdapterFibsVirtualAddress = cpu_to_le32((u32)phys);
+       init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
        init->AdapterFibsSize = cpu_to_le32(fibsize);
        init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
+       init->HostPhysMemPages = cpu_to_le32(4096);             // number of 4k pages of host physical memory
 
        /*
         * Increment the base address by the amount already used
         */
        base = base + fibsize + sizeof(struct aac_init);
-       phys = phys + fibsize + sizeof(struct aac_init);
+       phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
        /*
         *      Align the beginning of Headers to commalign
         */
@@ -111,8 +107,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
        /*
         *      Fill in addresses of the Comm Area Headers and Queues
         */
-       *commaddr = (unsigned long *)base;
-       init->CommHeaderAddress = cpu_to_le32(phys);
+       *commaddr = base;
+       init->CommHeaderAddress = cpu_to_le32((u32)phys);
        /*
         *      Increment the base address by the size of the CommArea
         */
@@ -144,8 +140,8 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
        q->lock = &q->lockdata;
        q->headers.producer = mem;
        q->headers.consumer = mem+1;
-       *q->headers.producer = cpu_to_le32(qsize);
-       *q->headers.consumer = cpu_to_le32(qsize);
+       *(q->headers.producer) = cpu_to_le32(qsize);
+       *(q->headers.consumer) = cpu_to_le32(qsize);
        q->entries = qsize;
 }
 
@@ -250,9 +246,9 @@ int aac_comm_init(struct aac_dev * dev)
        if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
                return -ENOMEM;
 
-       queues = (struct aac_entry *)((unsigned char *)headers + hdrsize);
+       queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
 
-       /* Adapter to Host normal proirity Command queue */ 
+       /* Adapter to Host normal priority Command queue */ 
        comm->queue[HostNormCmdQueue].base = queues;
        aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
        queues += HOST_NORM_CMD_ENTRIES;
@@ -317,23 +313,25 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
        /*
         *      Ok now init the communication subsystem
         */
-       dev->queues = (struct aac_queue_block *) 
-           kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
+       dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
        if (dev->queues == NULL) {
                printk(KERN_ERR "Error could not allocate comm region.\n");
                return NULL;
        }
        memset(dev->queues, 0, sizeof(struct aac_queue_block));
 
-       if (aac_comm_init(dev)<0)
+       if (aac_comm_init(dev)<0){
+               kfree(dev->queues);
                return NULL;
-
+       }
        /*
         *      Initialize the list of fibs
         */
-       if(fib_setup(dev)<0)
-           return NULL;
-               
+       if(fib_setup(dev)<0){
+               kfree(dev->queues);
+               return NULL;
+       }
+
        INIT_LIST_HEAD(&dev->fib_list);
 #if 0
        init_completion(&dev->aif_completion);
index a1fabe7b19aa1707ba41d187f413160c1f360d1a..84c77464f5a5325458562ced8eaa25812d8f8aea 100644 (file)
  *
  */
 
-#include <xeno/config.h>
-/* #include <xeno/kernel.h> */
-#include <xeno/init.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-#include <xeno/pci.h>
-#include <xeno/spinlock.h>
-
-#include <xeno/interrupt.h> /* tasklet stuff */
-
-/*  #include <xeno/slab.h> */
-/*  #include <xeno/completion.h> */
-/*  #include <asm/semaphore.h> */
-#include <xeno/blk.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+/*#include <linux/completion.h>*/
+/*#include <asm/semaphore.h>*/
+#include <linux/blk.h>
+#include <asm/uaccess.h>
+
+#include <xeno/interrupt.h>
 #include <xeno/delay.h>
+
 #include "scsi.h"
 #include "hosts.h"
 
  
 static int fib_map_alloc(struct aac_dev *dev)
 {
-    if((dev->hw_fib_va = 
-       pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, 
-                            &dev->hw_fib_pa))==NULL)
-       return -ENOMEM;
-    return 0;
+       if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
+               return -ENOMEM;
+       return 0;
 }
 
 /**
@@ -78,8 +77,7 @@ static int fib_map_alloc(struct aac_dev *dev)
 
 void fib_map_free(struct aac_dev *dev)
 {
-    pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, 
-                       dev->hw_fib_va, dev->hw_fib_pa);
+       pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
 }
 
 /**
@@ -92,45 +90,45 @@ void fib_map_free(struct aac_dev *dev)
 
 int fib_setup(struct aac_dev * dev)
 {
-    struct fib *fibptr;
-    struct hw_fib *fib;
-    dma_addr_t fibpa;
-    int i;
-    
-    if(fib_map_alloc(dev)<0)
-       return -ENOMEM;
-    
-    fib = dev->hw_fib_va;
-    fibpa = dev->hw_fib_pa;
-    memset(fib, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
-    /*
-     * Initialise the fibs
-     */
-    for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
-    {
-       fibptr->dev = dev;
-       fibptr->fib = fib;
-       fibptr->data = (void *) fibptr->fib->data;
-       fibptr->next = fibptr+1;        /* Forward chain the fibs */
+       struct fib *fibptr;
+       struct hw_fib *hw_fib_va;
+       dma_addr_t hw_fib_pa;
+       int i;
+       
+       if(fib_map_alloc(dev)<0)
+               return -ENOMEM;
+               
+       hw_fib_va = dev->hw_fib_va;
+       hw_fib_pa = dev->hw_fib_pa;
+       memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
+       /*
+        *      Initialise the fibs
+        */
+       for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
+       {
+               fibptr->dev = dev;
+               fibptr->hw_fib = hw_fib_va;
+               fibptr->data = (void *) fibptr->hw_fib->data;
+               fibptr->next = fibptr+1;        /* Forward chain the fibs */
 #if 0
-       init_MUTEX_LOCKED(&fibptr->event_wait);
+               init_MUTEX_LOCKED(&fibptr->event_wait);
 #endif
-       spin_lock_init(&fibptr->event_lock);
-       fib->header.XferState = cpu_to_le32(0xffffffff);
-       fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
-       fibptr->logicaladdr = (unsigned long) fibpa;
-       fib = (struct hw_fib *)((unsigned char *)fib + sizeof(struct hw_fib));
-       fibpa = fibpa + sizeof(struct hw_fib);
-    }
-    /*
-     * Add the fib chain to the free list
-     */
-    dev->fibs[AAC_NUM_FIB-1].next = NULL;
-    /*
-     * Enable this to debug out of queue space
-     */
-    dev->free_fib = &dev->fibs[0];
-    return 0;
+               spin_lock_init(&fibptr->event_lock);
+               hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
+               hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
+               fibptr->hw_fib_pa = hw_fib_pa;
+               hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
+               hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib); 
+       }
+       /*
+        *      Add the fib chain to the free list
+        */
+       dev->fibs[AAC_NUM_FIB-1].next = NULL;
+       /*
+        *      Enable this to debug out of queue space
+        */
+       dev->free_fib = &dev->fibs[0];
+       return 0;
 }
 
 /**
@@ -143,29 +141,29 @@ int fib_setup(struct aac_dev * dev)
  
 struct fib * fib_alloc(struct aac_dev *dev)
 {
-    struct fib * fibptr;
-    unsigned long flags;
-    
-    spin_lock_irqsave(&dev->fib_lock, flags);
-    fibptr = dev->free_fib;    
-    if(!fibptr)
-       BUG();
-    dev->free_fib = fibptr->next;
-    spin_unlock_irqrestore(&dev->fib_lock, flags);
-    /*
-     * Set the proper node type code and node byte size
-     */
-    fibptr->type = FSAFS_NTC_FIB_CONTEXT;
-    fibptr->size = sizeof(struct fib);
-    /*
-     * Null out fields that depend on being zero at the start of
-     * each I/O
-     */
-    fibptr->fib->header.XferState = cpu_to_le32(0);
-    fibptr->callback = NULL;
-    fibptr->callback_data = NULL;
-    
-    return fibptr;
+       struct fib * fibptr;
+       unsigned long flags;
+       
+       spin_lock_irqsave(&dev->fib_lock, flags);
+       fibptr = dev->free_fib; 
+       if(!fibptr)
+               BUG();
+       dev->free_fib = fibptr->next;
+       spin_unlock_irqrestore(&dev->fib_lock, flags);
+       /*
+        *      Set the proper node type code and node byte size
+        */
+       fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+       fibptr->size = sizeof(struct fib);
+       /*
+        *      Null out fields that depend on being zero at the start of
+        *      each I/O
+        */
+       fibptr->hw_fib->header.XferState = cpu_to_le32(0);
+       fibptr->callback = NULL;
+       fibptr->callback_data = NULL;
+
+       return fibptr;
 }
 
 /**
@@ -178,24 +176,23 @@ struct fib * fib_alloc(struct aac_dev *dev)
  
 void fib_free(struct fib * fibptr)
 {
-    unsigned long flags;
-    
-    spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
-    
-    if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
-       aac_config.fib_timeouts++;
-       fibptr->next = fibptr->dev->timeout_fib;
-       fibptr->dev->timeout_fib = fibptr;
-    } else {
-       if (fibptr->fib->header.XferState != 0) {
-           printk(KERN_WARNING "fib_free, XferState != 0, "
-                  "fibptr = 0x%p, XferState = 0x%x\n", 
-                  (void *)fibptr, fibptr->fib->header.XferState);
-       }
-       fibptr->next = fibptr->dev->free_fib;
-       fibptr->dev->free_fib = fibptr;
-    }  
-    spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
+
+       if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
+               aac_config.fib_timeouts++;
+               fibptr->next = fibptr->dev->timeout_fib;
+               fibptr->dev->timeout_fib = fibptr;
+       } else {
+               if (fibptr->hw_fib->header.XferState != 0) {
+                       printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 
+                                (void*)fibptr, fibptr->hw_fib->header.XferState);
+               }
+               fibptr->next = fibptr->dev->free_fib;
+               fibptr->dev->free_fib = fibptr;
+       }       
+       spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
 }
 
 /**
@@ -207,15 +204,14 @@ void fib_free(struct fib * fibptr)
  
 void fib_init(struct fib *fibptr)
 {
-    struct hw_fib *fib = fibptr->fib;
-    
-    fib->header.StructType = FIB_MAGIC;
-    fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
-    fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | 
-                                       FibEmpty | FastResponseCapable);
-    fib->header.SenderFibAddress = cpu_to_le32(0);
-    fib->header.ReceiverFibAddress = cpu_to_le32(0);
-    fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
+       struct hw_fib *hw_fib = fibptr->hw_fib;
+
+       hw_fib->header.StructType = FIB_MAGIC;
+       hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
+       hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
+       hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+       hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+       hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
 }
 
 /**
@@ -228,10 +224,10 @@ void fib_init(struct fib *fibptr)
  
 void fib_dealloc(struct fib * fibptr)
 {
-    struct hw_fib *fib = fibptr->fib;
-    if(fib->header.StructType != FIB_MAGIC) 
-       BUG();
-    fib->header.XferState = cpu_to_le32(0);        
+       struct hw_fib *hw_fib = fibptr->hw_fib;
+       if(hw_fib->header.StructType != FIB_MAGIC) 
+               BUG();
+       hw_fib->header.XferState = cpu_to_le32(0);        
 }
 
 /*
@@ -256,48 +252,47 @@ void fib_dealloc(struct fib * fibptr)
  
 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
 {
-    struct aac_queue * q;
+       struct aac_queue * q;
 
-    /*
-     * All of the queues wrap when they reach the end, so we check
-     * to see if they have reached the end and if they have we just
-     * set the index back to zero. This is a wrap. You could or off
-     * the high bits in all updates but this is a bit faster I think.
-     */
+       /*
+        *      All of the queues wrap when they reach the end, so we check
+        *      to see if they have reached the end and if they have we just
+        *      set the index back to zero. This is a wrap. You could or off
+        *      the high bits in all updates but this is a bit faster I think.
+        */
 
-    q = &dev->queues->queue[qid];
+       q = &dev->queues->queue[qid];
        
-    *index = le32_to_cpu(*(q->headers.producer));
-    if (*index - 2 == le32_to_cpu(*(q->headers.consumer)))
-       *nonotify = 1; 
-
-    if (qid == AdapHighCmdQueue) {
-       if (*index >= ADAP_HIGH_CMD_ENTRIES)
-           *index = 0;
-    } else if (qid == AdapNormCmdQueue) {
-       if (*index >= ADAP_NORM_CMD_ENTRIES) 
-           *index = 0; /* Wrap to front of the Producer Queue. */
-    }
-    else if (qid == AdapHighRespQueue) 
-    {
-       if (*index >= ADAP_HIGH_RESP_ENTRIES)
-           *index = 0;
-    }
-    else if (qid == AdapNormRespQueue) 
-    {
-       if (*index >= ADAP_NORM_RESP_ENTRIES) 
-           *index = 0; /* Wrap to front of the Producer Queue. */
-    }
-    else BUG();
-
-    if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue full */
-       printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", 
-              qid, q->numpending);
-       return 0;
-    } else {
-       *entry = q->base + *index;
-       return 1;
-    }
+       *index = le32_to_cpu(*(q->headers.producer));
+       if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
+                       *nonotify = 1; 
+
+       if (qid == AdapHighCmdQueue) {
+               if (*index >= ADAP_HIGH_CMD_ENTRIES)
+                       *index = 0;
+       } else if (qid == AdapNormCmdQueue) {
+               if (*index >= ADAP_NORM_CMD_ENTRIES) 
+                       *index = 0; /* Wrap to front of the Producer Queue. */
+       }
+       else if (qid == AdapHighRespQueue) 
+       {
+               if (*index >= ADAP_HIGH_RESP_ENTRIES)
+                       *index = 0;
+       }
+       else if (qid == AdapNormRespQueue) 
+       {
+               if (*index >= ADAP_NORM_RESP_ENTRIES) 
+                       *index = 0; /* Wrap to front of the Producer Queue. */
+       }
+       else BUG();
+
+        if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
+               printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", qid, q->numpending);
+               return 0;
+       } else {
+               *entry = q->base + *index;
+               return 1;
+       }
 }   
 
 /**
@@ -316,48 +311,49 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
  *     success.
  */
 
-static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * fib, int wait, struct fib * fibptr, unsigned long *nonotify)
+static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
 {
-    struct aac_entry * entry = NULL;
-    int map = 0;
-    struct aac_queue * q = &dev->queues->queue[qid];
+       struct aac_entry * entry = NULL;
+       int map = 0;
+       struct aac_queue * q = &dev->queues->queue[qid];
                
-    spin_lock_irqsave(q->lock, q->SavedIrql);
+       spin_lock_irqsave(q->lock, q->SavedIrql);
            
-    if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
-    {
-       /*  if no entries wait for some if caller wants to */
-       while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+       if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
        {
-           printk(KERN_ERR "GetEntries failed\n");
+               /*  if no entries wait for some if caller wants to */
+               while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+               {
+                       printk(KERN_ERR "GetEntries failed\n");
+               }
+               /*
+                *      Setup queue entry with a command, status and fib mapped
+                */
+               entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+               map = 1;
        }
-       /*
-        *      Setup queue entry with a command, status and fib mapped
-        */
-       entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
-       map = 1;
-    }
-    else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
-    {
-       while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+       else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
        {
-           /* if no entries wait for some if caller wants to */
-       }
+               while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
+               {
+                       /* if no entries wait for some if caller wants to */
+               }
+               /*
+                *      Setup queue entry with command, status and fib mapped
+                */
+               entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+               entry->addr = hw_fib->header.SenderFibAddress;
+                       /* Restore adapters pointer to the FIB */
+               hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;    /* Let the adapter now where to find its data */
+               map = 0;
+       } 
        /*
-        *      Setup queue entry with command, status and fib mapped
+        *      If MapFib is true than we need to map the Fib and put pointers
+        *      in the queue entry.
         */
-       entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
-       entry->addr = cpu_to_le32(fib->header.SenderFibAddress);                /* Restore adapters pointer to the FIB */
-       fib->header.ReceiverFibAddress = fib->header.SenderFibAddress;          /* Let the adapter now where to find its data */
-       map = 0;
-    } 
-    /*
-     * If MapFib is true than we need to map the Fib and put pointers
-     * in the queue entry.
-     */
-    if (map)
-       entry->addr = cpu_to_le32((unsigned long)(fibptr->logicaladdr));
-    return 0;
+       if (map)
+               entry->addr = fibptr->hw_fib_pa;
+       return 0;
 }
 
 
@@ -376,24 +372,24 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
  
 static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) 
 {
-    struct aac_queue * q = &dev->queues->queue[qid];
-
-    if(q == NULL)
-       BUG();
-    *(q->headers.producer) = cpu_to_le32(index + 1);
-    spin_unlock_irqrestore(q->lock, q->SavedIrql);
-
-    if (qid == AdapHighCmdQueue ||
-       qid == AdapNormCmdQueue ||
-       qid == AdapHighRespQueue ||
-       qid == AdapNormRespQueue)
-    {
-       if (!nonotify)
-           aac_adapter_notify(dev, qid);
-    }
-    else
-       printk("Suprise insert!\n");
-    return 0;
+       struct aac_queue * q = &dev->queues->queue[qid];
+
+       if(q == NULL)
+               BUG();
+       *(q->headers.producer) = cpu_to_le32(index + 1);
+       spin_unlock_irqrestore(q->lock, q->SavedIrql);
+
+       if (qid == AdapHighCmdQueue ||
+           qid == AdapNormCmdQueue ||
+           qid == AdapHighRespQueue ||
+           qid == AdapNormRespQueue)
+       {
+               if (!nonotify)
+                       aac_adapter_notify(dev, qid);
+       }
+       else
+               printk("Suprise insert!\n");
+       return 0;
 }
 
 /*
@@ -423,149 +419,141 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
  
 int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
 {
-    u32 index;
-    u32 qid;
-    struct aac_dev * dev = fibptr->dev;
-    unsigned long nointr = 0;
-    struct hw_fib * fib = fibptr->fib;
-    struct aac_queue * q;
-    unsigned long flags = 0;
-
-    if (!(le32_to_cpu(fib->header.XferState) & HostOwned))
-       return -EBUSY;
-    /*
-     * There are 5 cases with the wait and reponse requested flags. 
-     * The only invalid cases are if the caller requests to wait and
-     * does not request a response and if the caller does not want a
-     * response and the Fibis not allocated from pool. If a response
-     * is not requesed the Fib will just be deallocaed by the DPC
-     * routine when the response comes back from the adapter. No
-     * further processing will be done besides deleting the Fib. We 
-     * will have a debug mode where the adapter can notify the host
-     * it had a problem and the host can log that fact.
-     */
-    if (wait && !reply) {
-       return -EINVAL;
-    } else if (!wait && reply) {
-       fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
-       FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
-    } else if (!wait && !reply) {
-       fib->header.XferState |= cpu_to_le32(NoResponseExpected);
-       FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
-    } else if (wait && reply) {
-       fib->header.XferState |= cpu_to_le32(ResponseExpected);
-       FIB_COUNTER_INCREMENT(aac_config.NormalSent);
-    } 
-    /*
-     * Map the fib into 32bits by using the fib number
-     */
-    fib->header.SenderData = fibptr-&dev->fibs[0];     /* for callback */
-    /*
-     * Set FIB state to indicate where it came from and if we want a
-     * response from the adapter. Also load the command from the
-     * caller.
-     *
-     * Map the hw fib pointer as a 32bit value
-     */
-    fib->header.SenderFibAddress = fib2addr(fib);
-    fib->header.Command = cpu_to_le16(command);
-    fib->header.XferState |= cpu_to_le32(SentFromHost);
-    fibptr->fib->header.Flags = 0; /* Zero flags field - its internal only */
-    /*
-     * Set the size of the Fib we want to send to the adapter
-     */
-    fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
-    if (le16_to_cpu(fib->header.Size) > le16_to_cpu(fib->header.SenderSize)) {
-       return -EMSGSIZE;
-    }                
-    /*
-     * Get a queue entry connect the FIB to it and send an notify
-     * the adapter a command is ready.
-     */
-    if (priority == FsaHigh) {
-       fib->header.XferState |= cpu_to_le32(HighPriority);
-       qid = AdapHighCmdQueue;
-    } else {
-       fib->header.XferState |= cpu_to_le32(NormalPriority);
-       qid = AdapNormCmdQueue;
-    }
-    q = &dev->queues->queue[qid];
-
-    if(wait)
-       spin_lock_irqsave(&fibptr->event_lock, flags);
-
-    if(aac_queue_get( dev, &index, qid, fib, 1, fibptr, &nointr)<0)
-       return -EWOULDBLOCK;
-    dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",
-            index));
-    dprintk((KERN_DEBUG "Fib contents:.\n"));
-    dprintk((KERN_DEBUG "  Command =               %d.\n", 
-            fib->header.Command));
-    dprintk((KERN_DEBUG "  XferState  =            %x.\n", 
-            fib->header.XferState));
-    /*
-     * Fill in the Callback and CallbackContext if we are not
-     * going to wait.
-     */
-    if (!wait) {
-       fibptr->callback = callback;
-       fibptr->callback_data = callback_data;
-    }
-    FIB_COUNTER_INCREMENT(aac_config.FibsSent);
-    list_add_tail(&fibptr->queue, &q->pendingq);
-    q->numpending++;
-
-    fibptr->done = 0;
-
-    if(aac_insert_entry(dev, index, qid, 
-                       (nointr & aac_config.irq_mod)) < 0)
-       return -EWOULDBLOCK;
-    /*
-     * If the caller wanted us to wait for response wait now. 
-     */
+       u32 index;
+       u32 qid;
+       struct aac_dev * dev = fibptr->dev;
+       unsigned long nointr = 0;
+       struct hw_fib * hw_fib = fibptr->hw_fib;
+       struct aac_queue * q;
+       unsigned long flags = 0;
+
+       if (!(le32_to_cpu(hw_fib->header.XferState) & HostOwned))
+               return -EBUSY;
+       /*
+        *      There are 5 cases with the wait and reponse requested flags. 
+        *      The only invalid cases are if the caller requests to wait and
+        *      does not request a response and if the caller does not want a
+        *      response and the Fibis not allocated from pool. If a response
+        *      is not requesed the Fib will just be deallocaed by the DPC
+        *      routine when the response comes back from the adapter. No
+        *      further processing will be done besides deleting the Fib. We 
+        *      will have a debug mode where the adapter can notify the host
+        *      it had a problem and the host can log that fact.
+        */
+       if (wait && !reply) {
+               return -EINVAL;
+       } else if (!wait && reply) {
+               hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
+               FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
+       } else if (!wait && !reply) {
+               hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
+               FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
+       } else if (wait && reply) {
+               hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
+               FIB_COUNTER_INCREMENT(aac_config.NormalSent);
+       } 
+       /*
+        *      Map the fib into 32bits by using the fib number
+        */
+
+//     hw_fib->header.SenderFibAddress = ((u32)(fibptr-dev->fibs)) << 1;
+       hw_fib->header.SenderFibAddress = cpu_to_le32((u32)(ulong)fibptr->hw_fib_pa);
+       hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
+       /*
+        *      Set FIB state to indicate where it came from and if we want a
+        *      response from the adapter. Also load the command from the
+        *      caller.
+        *
+        *      Map the hw fib pointer as a 32bit value
+        */
+       hw_fib->header.Command = cpu_to_le16(command);
+       hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
+       fibptr->hw_fib->header.Flags = 0;       /* 0 the flags field - internal only*/
+       /*
+        *      Set the size of the Fib we want to send to the adapter
+        */
+       hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
+       if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
+               return -EMSGSIZE;
+       }                
+       /*
+        *      Get a queue entry connect the FIB to it and send an notify
+        *      the adapter a command is ready.
+        */
+       if (priority == FsaHigh) {
+               hw_fib->header.XferState |= cpu_to_le32(HighPriority);
+               qid = AdapHighCmdQueue;
+       } else {
+               hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
+               qid = AdapNormCmdQueue;
+       }
+       q = &dev->queues->queue[qid];
+
+       if(wait)
+               spin_lock_irqsave(&fibptr->event_lock, flags);
+       if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
+               return -EWOULDBLOCK;
+       dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
+       dprintk((KERN_DEBUG "Fib contents:.\n"));
+       dprintk((KERN_DEBUG "  Command =               %d.\n", hw_fib->header.Command));
+       dprintk((KERN_DEBUG "  XferState  =            %x.\n", hw_fib->header.XferState));
+       dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
+       dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
+       dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
+       /*
+        *      Fill in the Callback and CallbackContext if we are not
+        *      going to wait.
+        */
+       if (!wait) {
+               fibptr->callback = callback;
+               fibptr->callback_data = callback_data;
+       }
+       FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+       list_add_tail(&fibptr->queue, &q->pendingq);
+       q->numpending++;
+
+       fibptr->done = 0;
+       fibptr->flags = 0;
+
+       if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
+               return -EWOULDBLOCK;
+       /*
+        *      If the caller wanted us to wait for response wait now. 
+        */
     
-    if (wait) {
-       spin_unlock_irqrestore(&fibptr->event_lock, flags);
+       if (wait) {
+               spin_unlock_irqrestore(&fibptr->event_lock, flags);
 #if 0
-       down(&fibptr->event_wait);
+               down(&fibptr->event_wait);
+               if(fibptr->done == 0)
+                       BUG();
 #endif
 #ifdef TRY_TASKLET
-        /*
-         * XXX KAF: Well, this is pretty gross. We should probably
-         * do_softirq() after scheduling the tasklet, as long as we
-         * are _sure_ we hold no locks here...
-         */
-//     printk("about to softirq aac_command_thread...\n"); 
        while (!fibptr->done) { 
-            tasklet_schedule(&aac_command_tasklet);
-           do_softirq(); /* force execution */
-//         mdelay(100); 
+               tasklet_schedule(&aac_command_tasklet);
+               do_softirq(); /* force execution */
        }
-//     printk("back from softirq cmd thread and fibptr->done!\n"); 
 #else 
-       printk("about to bail at aac_command_thread...\n"); 
        while (!fibptr->done) { 
-           mdelay(100); 
-           aac_command_thread(dev); 
+               mdelay(100); 
+               aac_command_thread(dev); 
        }
-       printk("back from command thread and fibptr->done!\n"); 
 #endif
-/*  if(fibptr->done == 0) */
-/*                     BUG(); */
+
                        
-       if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
-           return -ETIMEDOUT;
+               if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+                       return -ETIMEDOUT;
+               else
+                       return 0;
+       }
+       /*
+        *      If the user does not want a response than return success otherwise
+        *      return pending
+        */
+       if (reply)
+               return -EINPROGRESS;
        else
-           return 0;
-    }
-    /*
-     * If the user does not want a response than return success otherwise
-     * return pending
-     */
-    if (reply)
-       return -EINPROGRESS;
-    else
-       return 0;
+               return 0;
 }
 
 /** 
@@ -574,37 +562,36 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority
  *     @q: Queue
  *     @entry: Return entry
  *
- *      Will return a pointer to the entry on the top of the queue
- *     requested that we are a consumer of, and return the address of
- *     the queue entry. It does * not change the state of the queue.
+ *     Will return a pointer to the entry on the top of the queue requested that
+ *     we are a consumer of, and return the address of the queue entry. It does
+ *     not change the state of the queue. 
  */
 
 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
 {
-    u32 index;
-    int status;
-
-    if (*q->headers.producer == *q->headers.consumer) {
-       status = 0;
-    } else {
-       /*
-        *      The consumer index must be wrapped if we have reached
-        *      the end of the queue, else we just use the entry
-        *      pointed to by the header index
-        */
-       if (le32_to_cpu(*q->headers.consumer) >= q->entries) 
-           index = 0;          
-       else
-           index = le32_to_cpu(*q->headers.consumer);
-       *entry = q->base + index;
-       status = 1;
-    }
-    return(status);
+       u32 index;
+       int status;
+       if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
+               status = 0;
+       } else {
+               /*
+                *      The consumer index must be wrapped if we have reached
+                *      the end of the queue, else we just use the entry
+                *      pointed to by the header index
+                */
+               if (le32_to_cpu(*q->headers.consumer) >= q->entries) 
+                       index = 0;              
+               else
+                       index = le32_to_cpu(*q->headers.consumer);
+               *entry = q->base + index;
+               status = 1;
+       }
+       return(status);
 }
 
 int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
 {
-    return (*q->headers.producer != *q->headers.consumer);
+       return (le32_to_cpu(*q->headers.producer) != le32_to_cpu(*q->headers.consumer));
 }
 
 
@@ -620,39 +607,38 @@ int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
 
 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
 {
-    int wasfull = 0;
-    u32 notify;
+       int wasfull = 0;
+       u32 notify;
 
-    if (*q->headers.producer+1 == *q->headers.consumer)
-       wasfull = 1;
+       if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
+               wasfull = 1;
         
-    if (le32_to_cpu(*q->headers.consumer) >= q->entries)
-       *q->headers.consumer = cpu_to_le32(1);
-    else
-       *q->headers.consumer = 
-           cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
+       if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+               *q->headers.consumer = cpu_to_le32(1);
+       else
+               *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
         
-    if (wasfull) {
-       switch (qid) {
-
-       case HostNormCmdQueue:
-           notify = HostNormCmdNotFull;
-           break;
-       case HostHighCmdQueue:
-           notify = HostHighCmdNotFull;
-           break;
-       case HostNormRespQueue:
-           notify = HostNormRespNotFull;
-           break;
-       case HostHighRespQueue:
-           notify = HostHighRespNotFull;
-           break;
-       default:
-           BUG();
-           return;
+       if (wasfull) {
+               switch (qid) {
+
+               case HostNormCmdQueue:
+                       notify = HostNormCmdNotFull;
+                       break;
+               case HostHighCmdQueue:
+                       notify = HostHighCmdNotFull;
+                       break;
+               case HostNormRespQueue:
+                       notify = HostNormRespNotFull;
+                       break;
+               case HostHighRespQueue:
+                       notify = HostHighRespNotFull;
+                       break;
+               default:
+                       BUG();
+                       return;
+               }
+               aac_adapter_notify(dev, notify);
        }
-       aac_adapter_notify(dev, notify);
-    }
 }        
 
 /**
@@ -666,70 +652,65 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
 
 int fib_adapter_complete(struct fib * fibptr, unsigned short size)
 {
-    struct hw_fib * fib = fibptr->fib;
-    struct aac_dev * dev = fibptr->dev;
-    unsigned long nointr = 0;
-
-    if (le32_to_cpu(fib->header.XferState) == 0)
-       return 0;
-    /*
-     * If we plan to do anything check the structure type first.
-     */ 
-    if ( fib->header.StructType != FIB_MAGIC ) {
-       return -EINVAL;
-    }
-    /*
-     * This block handles the case where the adapter had sent us a
-     * command and we have finished processing the command. We
-     * call completeFib when we are done processing the command 
-     * and want to send a response back to the adapter. This will 
-     * send the completed cdb to the adapter.
-     */
-    if (fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
-       fib->header.XferState |= cpu_to_le32(HostProcessed);
-       if (fib->header.XferState & cpu_to_le32(HighPriority)) {
-           u32 index;
-           if (size) 
-           {
-               size += sizeof(struct aac_fibhdr);
-               if (size > le16_to_cpu(fib->header.SenderSize))
-                   return -EMSGSIZE;
-               fib->header.Size = cpu_to_le16(size);
-           }
-           if(aac_queue_get(dev, &index, AdapHighRespQueue, 
-                            fib, 1, NULL, &nointr) < 0) {
-               return -EWOULDBLOCK;
-           }
-           if (aac_insert_entry(dev, index, AdapHighRespQueue,  
-                                (nointr & (int)aac_config.irq_mod)) != 0) {
-           }
+       struct hw_fib * hw_fib = fibptr->hw_fib;
+       struct aac_dev * dev = fibptr->dev;
+       unsigned long nointr = 0;
+       if (le32_to_cpu(hw_fib->header.XferState) == 0)
+               return 0;
+       /*
+        *      If we plan to do anything check the structure type first.
+        */ 
+       if ( hw_fib->header.StructType != FIB_MAGIC ) {
+               return -EINVAL;
        }
-       else if (fib->header.XferState & NormalPriority) 
-       {
-           u32 index;
-
-           if (size) {
-               size += sizeof(struct aac_fibhdr);
-               if (size > le16_to_cpu(fib->header.SenderSize)) 
-                   return -EMSGSIZE;
-               fib->header.Size = cpu_to_le16(size);
-           }
-           if (aac_queue_get(dev, &index, AdapNormRespQueue, 
-                             fib, 1, NULL, &nointr) < 0) 
-               return -EWOULDBLOCK;
-           if (aac_insert_entry(dev, index, AdapNormRespQueue, 
-                                (nointr & (int)aac_config.irq_mod)) != 0) 
-           {
-           }
+       /*
+        *      This block handles the case where the adapter had sent us a
+        *      command and we have finished processing the command. We
+        *      call completeFib when we are done processing the command 
+        *      and want to send a response back to the adapter. This will 
+        *      send the completed cdb to the adapter.
+        */
+       if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
+               hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
+               if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
+                       u32 index;
+                               if (size) 
+                       {
+                               size += sizeof(struct aac_fibhdr);
+                               if (size > le16_to_cpu(hw_fib->header.SenderSize))
+                                       return -EMSGSIZE;
+                               hw_fib->header.Size = cpu_to_le16(size);
+                       }
+                       if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
+                               return -EWOULDBLOCK;
+                       }
+                       if (aac_insert_entry(dev, index, AdapHighRespQueue,  (nointr & (int)aac_config.irq_mod)) != 0) {
+                       }
+               }
+               else if (hw_fib->header.XferState & NormalPriority) 
+               {
+                       u32 index;
+
+                       if (size) {
+                               size += sizeof(struct aac_fibhdr);
+                               if (size > le16_to_cpu(hw_fib->header.SenderSize)) 
+                                       return -EMSGSIZE;
+                               hw_fib->header.Size = cpu_to_le16(size);
+                       }
+                       if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) 
+                               return -EWOULDBLOCK;
+                       if (aac_insert_entry(dev, index, AdapNormRespQueue, 
+                               (nointr & (int)aac_config.irq_mod)) != 0) 
+                       {
+                       }
+               }
        }
-    }
-    else 
-    {
-       printk(KERN_WARNING 
-              "fib_adapter_complete: Unknown xferstate detected.\n");
-       BUG();
-    }   
-    return 0;
+       else 
+       {
+               printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
+               BUG();
+       }   
+       return 0;
 }
 
 /**
@@ -741,44 +722,44 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
  
 int fib_complete(struct fib * fibptr)
 {
-    struct hw_fib * fib = fibptr->fib;
+       struct hw_fib * hw_fib = fibptr->hw_fib;
 
-    /*
-     * Check for a fib which has already been completed
-     */
+       /*
+        *      Check for a fib which has already been completed
+        */
 
-    if (fib->header.XferState == cpu_to_le32(0))
-       return 0;
-    /*
-     * If we plan to do anything check the structure type first.
-     */ 
-
-    if (fib->header.StructType != FIB_MAGIC)
-       return -EINVAL;
-    /*
-     * This block completes a cdb which orginated on the host and we 
-     * just need to deallocate the cdb or reinit it. At this point the
-     * command is complete that we had sent to the adapter and this
-     * cdb could be reused.
-     */
-    if((fib->header.XferState & cpu_to_le32(SentFromHost)) &&
-       (fib->header.XferState & cpu_to_le32(AdapterProcessed)))
-    {
-       fib_dealloc(fibptr);
-    }
-    else if(fib->header.XferState & cpu_to_le32(SentFromHost))
-    {
+       if (hw_fib->header.XferState == cpu_to_le32(0))
+               return 0;
+       /*
+        *      If we plan to do anything check the structure type first.
+        */ 
+
+       if (hw_fib->header.StructType != FIB_MAGIC)
+               return -EINVAL;
        /*
-        *      This handles the case when the host has aborted the I/O
-        *      to the adapter because the adapter is not responding
+        *      This block completes a cdb which orginated on the host and we 
+        *      just need to deallocate the cdb or reinit it. At this point the
+        *      command is complete that we had sent to the adapter and this
+        *      cdb could be reused.
         */
-       fib_dealloc(fibptr);
-    } else if(fib->header.XferState & cpu_to_le32(HostOwned)) {
-       fib_dealloc(fibptr);
-    } else {
-       BUG();
-    }   
-    return 0;
+       if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
+               (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
+       {
+               fib_dealloc(fibptr);
+       }
+       else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
+       {
+               /*
+                *      This handles the case when the host has aborted the I/O
+                *      to the adapter because the adapter is not responding
+                */
+               fib_dealloc(fibptr);
+       } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
+               fib_dealloc(fibptr);
+       } else {
+               BUG();
+       }   
+       return 0;
 }
 
 /**
@@ -792,23 +773,23 @@ int fib_complete(struct fib * fibptr)
 
 void aac_printf(struct aac_dev *dev, u32 val)
 {
-    int length = val & 0xffff;
-    int level = (val >> 16) & 0xffff;
-    char *cp = dev->printfbuf;
+       int length = val & 0xffff;
+       int level = (val >> 16) & 0xffff;
+       char *cp = dev->printfbuf;
        
-    /*
-     * The size of the printfbuf is set in port.c
-     * There is no variable or define for it
-     */
-    if (length > 255)
-       length = 255;
-    if (cp[length] != 0)
-       cp[length] = 0;
-    if (level == LOG_HIGH_ERROR)
-       printk(KERN_WARNING "aacraid:%s", cp);
-    else
-       printk(KERN_INFO "aacraid:%s", cp);
-    memset(cp, 0,  256);
+       /*
+        *      The size of the printfbuf is set in port.c
+        *      There is no variable or define for it
+        */
+       if (length > 255)
+               length = 255;
+       if (cp[length] != 0)
+               cp[length] = 0;
+       if (level == LOG_HIGH_ERROR)
+               printk(KERN_WARNING "aacraid:%s", cp);
+       else
+               printk(KERN_INFO "aacraid:%s", cp);
+       memset(cp, 0,  256);
 }
 
 
@@ -821,16 +802,117 @@ void aac_printf(struct aac_dev *dev, u32 val)
  *     dispatches it to the appropriate routine for handling.
  */
 
+#define CONTAINER_TO_BUS(cont)         (0)
+#define CONTAINER_TO_TARGET(cont)      ((cont))
+#define CONTAINER_TO_LUN(cont)         (0)
+
 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
 {
-    struct hw_fib * fib = fibptr->fib;
-    /*
-     * Set the status of this FIB to be Invalid parameter.
-     *
-     * *(u32 *)fib->data = ST_INVAL;
-     */
-    *(u32 *)fib->data = cpu_to_le32(ST_OK);
-    fib_adapter_complete(fibptr, sizeof(u32));
+#if 0
+       struct hw_fib * hw_fib = fibptr->hw_fib;
+       struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
+       int busy;
+       u32 container;
+       mm_segment_t fs;
+
+       /* Sniff for container changes */
+       dprintk ((KERN_INFO "AifCmdDriverNotify=%x\n", le32_to_cpu(*(u32 *)aifcmd->data)));
+       switch (le32_to_cpu(*(u32 *)aifcmd->data)) {
+       case AifDenMorphComplete:
+       case AifDenVolumeExtendComplete:
+       case AifEnContainerChange: /* Not really a driver notify Event */
+
+               busy = 0;
+               container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
+               dprintk ((KERN_INFO "container=%d(%d,%d,%d,%d) ",
+                 container,
+                 (dev && dev->scsi_host_ptr)
+                   ? dev->scsi_host_ptr->host_no
+                   : -1,
+                 CONTAINER_TO_BUS(container),
+                 CONTAINER_TO_TARGET(container),
+                 CONTAINER_TO_LUN(container)));
+
+               /*
+                *      Find the Scsi_Device associated with the SCSI address,
+                * and mark it as changed, invalidating the cache. This deals
+                * with changes to existing device IDs.
+                */
+
+               if ((dev != (struct aac_dev *)NULL)
+                && (dev->scsi_host_ptr != (struct Scsi_Host *)NULL)) {
+                       Scsi_Device * device;
+
+                       for (device = dev->scsi_host_ptr->host_queue;
+                         device != (Scsi_Device *)NULL;
+                         device = device->next) {
+                               dprintk((KERN_INFO
+                                 "aifd: device (%d,%d,%d,%d)?\n",
+                                 dev->scsi_host_ptr->host_no,
+                                 device->channel,
+                                 device->id,
+                                 device->lun));
+                               if ((device->channel == CONTAINER_TO_BUS(container))
+                                && (device->id == CONTAINER_TO_TARGET(container))
+                                && (device->lun == CONTAINER_TO_LUN(container))) {
+                                       busy |= (device->access_count != 0);
+                                       if (busy == 0) {
+                                               device->removable = TRUE;
+                                       }
+                               }
+                       }
+               }
+               dprintk (("busy=%d\n", busy));
+
+               /*
+                * if (busy == 0) {
+                *      scan_scsis(dev->scsi_host_ptr, 1,
+                *        CONTAINER_TO_BUS(container),
+                *        CONTAINER_TO_TARGET(container),
+                *        CONTAINER_TO_LUN(container));
+                * }
+                * is not exported as accessible, so we need to go around it
+                * another way. So, we look for the "proc/scsi/scsi" entry in
+                * the proc filesystem (using proc_scsi as a shortcut) and send
+                * it a message. This deals with new devices that have
+                * appeared. If the device has gone offline, scan_scsis will
+                * also discover this, but we do not want the device to
+                * go away. We need to check the access_count for the
+                * device since we are not wanting the devices to go away.
+                */
+               if (busy == 0 && proc_scsi != NULL) {
+                       struct proc_dir_entry * entry;
+
+                       dprintk((KERN_INFO "proc_scsi=%p ", proc_scsi));
+                       for (entry = proc_scsi->subdir; entry != (struct proc_dir_entry *)NULL; entry = entry->next) {
+                               dprintk(("\"%.*s\"[%d]=%x ", entry->namelen,
+                                 entry->name, entry->namelen, entry->low_ino));
+                               if ((entry->low_ino != 0) && (entry->namelen == 4) && (memcmp ("scsi", entry->name, 4) == 0)) {
+                                       dprintk(("%p->write_proc=%p ", entry, entry->write_proc));
+                                       if (entry->write_proc != (int (*)(struct file *, const char *, unsigned long, void *))NULL) {
+                                               char buffer[80];
+                                               int length;
+
+                                               sprintf (buffer,
+                                                 "scsi add-single-device %d %d %d %d\n",
+                                                 dev->scsi_host_ptr->host_no,
+                                                 CONTAINER_TO_BUS(container),
+                                                 CONTAINER_TO_TARGET(container),
+                                                 CONTAINER_TO_LUN(container));
+                                               length = strlen (buffer);
+                                               dprintk((KERN_INFO "echo %.*s > /proc/scsi/scsi\n", length-1, buffer));
+                                               fs = get_fs();
+                                               set_fs(get_ds());
+                                               length = entry->write_proc(NULL, buffer, length, NULL);
+                                               set_fs(fs);
+                                               dprintk((KERN_INFO "returns %d\n", length));
+                                       }
+                                       break;
+                               }
+                       }
+               }
+       }
+#endif
 }
 
 /**
@@ -842,7 +924,6 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
  *     until the queue is empty. When the queue is empty it will wait for
  *     more FIBs.
  */
 #ifndef TRY_TASKLET
 int aac_command_thread(struct aac_dev * dev)
 {
@@ -850,176 +931,193 @@ int aac_command_thread(struct aac_dev * dev)
 DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
 void aac_command_thread(unsigned long data)
 #define return(_x) return 
-{   
-    struct aac_dev *dev = (struct aac_dev *)data; 
+{
+       struct aac_dev *dev = (struct aac_dev *)data; 
 #endif
-    struct hw_fib *fib, *newfib;
-    struct fib fibptr; /* for error logging */
-    struct aac_queue_block *queues = dev->queues;
-    struct aac_fib_context *fibctx;
-    unsigned long flags;
+       struct hw_fib *hw_fib, *hw_newfib;
+       struct fib *fib, *newfib;
+       struct aac_queue_block *queues = dev->queues;
+       struct aac_fib_context *fibctx;
+       unsigned long flags;
 #if 0
-    DECLARE_WAITQUEUE(wait, current);
+       DECLARE_WAITQUEUE(wait, current);
 #endif
 
-    /*
-     * We can only have one thread per adapter for AIF's.
-     */
-    if (dev->aif_thread)
-       return(-EINVAL);
-
+       /*
+        *      We can only have one thread per adapter for AIF's.
+        */
+       if (dev->aif_thread)
+               return(-EINVAL);
 #if 0
-    /*
-     * Set up the name that will appear in 'ps'
-     * stored in  task_struct.comm[16].
-     */
-    sprintf(current->comm, "aacraid");
-    daemonize();
+       /*
+        *      Set up the name that will appear in 'ps'
+        *      stored in  task_struct.comm[16].
+        */
+       sprintf(current->comm, "aacraid");
+       daemonize();
 #endif
-
-    /*
-     * Let the DPC know it has a place to send the AIF's to.
-     */
-    dev->aif_thread = 1;
-    memset(&fibptr, 0, sizeof(struct fib));
+       /*
+        *      Let the DPC know it has a place to send the AIF's to.
+        */
+       dev->aif_thread = 1;
 #if 0
-    add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
-    set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+       set_current_state(TASK_INTERRUPTIBLE);
+       dprintk ((KERN_INFO "aac_command_thread start\n"));
+       while(1) 
 #endif
-//    while(1) 
-    {
-
-       spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
-       while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
-           struct list_head *entry;
-           struct aac_aifcmd * aifcmd;
+       {
+               spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+               while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
+                       struct list_head *entry;
+                       struct aac_aifcmd * aifcmd;
 
 #if 0
-           set_current_state(TASK_RUNNING);
+                       set_current_state(TASK_RUNNING);
 #endif
 
-               
-           entry = queues->queue[HostNormCmdQueue].cmdq.next;
-           list_del(entry);
+                       entry = queues->queue[HostNormCmdQueue].cmdq.next;
+                       list_del(entry);
+       
+                       spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+                       fib = list_entry(entry, struct fib, fiblink);
+                       /*
+                        *      We will process the FIB here or pass it to a 
+                        *      worker thread that is TBD. We Really can't 
+                        *      do anything at this point since we don't have
+                        *      anything defined for this thread to do.
+                        */
+                       hw_fib = fib->hw_fib;
                        
-           spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock,flags);
-           fib = list_entry(entry, struct hw_fib, header.FibLinks);
-           /*
-            *  We will process the FIB here or pass it to a 
-            *  worker thread that is TBD. We Really can't 
-            *  do anything at this point since we don't have
-            *  anything defined for this thread to do.
-            */
-           memset(&fibptr, 0, sizeof(struct fib));
-           fibptr.type = FSAFS_NTC_FIB_CONTEXT;
-           fibptr.size = sizeof( struct fib );
-           fibptr.fib = fib;
-           fibptr.data = fib->data;
-           fibptr.dev = dev;
-           /*
-            *  We only handle AifRequest fibs from the adapter.
-            */
-           aifcmd = (struct aac_aifcmd *) fib->data;
-           if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
-               aac_handle_aif(dev, &fibptr);
-           } else {
-               /* The u32 here is important and intended. We are using
-                  32bit wrapping time to fit the adapter field */
-               u32 time_now, time_last;
-               unsigned long flagv;
-               
-               time_now = jiffies/HZ;
-
-               spin_lock_irqsave(&dev->fib_lock, flagv);
-               entry = dev->fib_list.next;
+                       memset(fib, 0, sizeof(struct fib));
+                       fib->type = FSAFS_NTC_FIB_CONTEXT;
+                       fib->size = sizeof( struct fib );
+                       fib->hw_fib = hw_fib;
+                       fib->data = hw_fib->data;
+                       fib->dev = dev;
+                       /*
+                        *      We only handle AifRequest fibs from the adapter.
+                        */
+                       aifcmd = (struct aac_aifcmd *) hw_fib->data;
+                       if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
+                               /* Handle Driver Notify Events */
+                               aac_handle_aif(dev, fib);
+                               *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
+                               fib_adapter_complete(fib, sizeof(u32));
+                       } else {
+                               struct list_head *entry;
+                               /* The u32 here is important and intended. We are using
+                                  32bit wrapping time to fit the adapter field */
+                                  
+                               u32 time_now, time_last;
+                               unsigned long flagv;
+                               
+                               /* Sniff events */
+                               if (aifcmd->command == cpu_to_le32(AifCmdEventNotify))
+                                       aac_handle_aif(dev, fib);
+
+                               time_now = jiffies/HZ;
+
+                               spin_lock_irqsave(&dev->fib_lock, flagv);
+                               entry = dev->fib_list.next;
                                /*
                                 * For each Context that is on the 
                                 * fibctxList, make a copy of the
                                 * fib, and then set the event to wake up the
                                 * thread that is waiting for it.
                                 */
-               while (entry != &dev->fib_list) {
-                   /*
-                    * Extract the fibctx
-                    */
-                   fibctx = list_entry(entry, struct aac_fib_context, next);
-                   /*
-                    * Check if the queue is getting
-                    * backlogged
-                    */
-                   if (fibctx->count > 20)
-                   {
-                       time_last = fibctx->jiffies;
-                       /*
-                        * Has it been > 2 minutes 
-                        * since the last read off
-                        * the queue?
-                        */
-                       if ((time_now - time_last) > 120) {
-                           entry = entry->next;
-                           aac_close_fib_context(dev, fibctx);
-                           continue;
-                       }
-                   }
-                   /*
-                    * Warning: no sleep allowed while
-                    * holding spinlock
-                    */
-                   newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
-                   if (newfib) {
-                       /*
-                        * Make the copy of the FIB
-                        */
-                       memcpy(newfib, fib, sizeof(struct hw_fib));
-                       /*
-                        * Put the FIB onto the
-                        * fibctx's fibs
-                        */
-                       list_add_tail(&newfib->header.FibLinks, &fibctx->fibs);
-                       fibctx->count++;
+                               while (entry != &dev->fib_list) {
+                                       /*
+                                        * Extract the fibctx
+                                        */
+                                       fibctx = list_entry(entry, struct aac_fib_context, next);
+                                       /*
+                                        * Check if the queue is getting
+                                        * backlogged
+                                        */
+                                       if (fibctx->count > 20)
+                                       {
+                                               /*
+                                                * It's *not* jiffies folks,
+                                                * but jiffies / HZ, so do not
+                                                * panic ...
+                                                */
+                                               time_last = fibctx->jiffies;
+                                               /*
+                                                * Has it been > 2 minutes 
+                                                * since the last read off
+                                                * the queue?
+                                                */
+                                               if ((time_now - time_last) > 120) {
+                                                       entry = entry->next;
+                                                       aac_close_fib_context(dev, fibctx);
+                                                       continue;
+                                               }
+                                       }
+                                       /*
+                                        * Warning: no sleep allowed while
+                                        * holding spinlock
+                                        */
+                                       hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
+                                       newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
+                                       if (newfib && hw_newfib) {
+                                               /*
+                                                * Make the copy of the FIB
+                                                * FIXME: check if we need to fix other fields up
+                                                */
+                                               memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
+                                               memcpy(newfib, fib, sizeof(struct fib));
+                                               newfib->hw_fib = hw_newfib;
+                                               /*
+                                                * Put the FIB onto the
+                                                * fibctx's fibs
+                                                */
+                                               list_add_tail(&newfib->fiblink, &fibctx->fib_list);
+                                               fibctx->count++;
 #if 0
-                       /* 
-                        * Set the event to wake up the
-                        * thread that will waiting.
-                        */
-                       up(&fibctx->wait_sem);
+                                               /* 
+                                                * Set the event to wake up the
+                                                * thread that will waiting.
+                                                */
+                                               up(&fibctx->wait_sem);
 #endif
-                   } else {
-                       printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
-                   }
-                   entry = entry->next;
-               }
+                                       } else {
+                                               printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+                                               if(newfib)
+                                                       kfree(newfib);
+                                               if(hw_newfib)
+                                                       kfree(hw_newfib);
+                                       }
+                                       entry = entry->next;
+                               }
                                /*
                                 *      Set the status of this FIB
                                 */
-               *(u32 *)fib->data = cpu_to_le32(ST_OK);
-               fib_adapter_complete(&fibptr, sizeof(u32));
-               spin_unlock_irqrestore(&dev->fib_lock, flagv);
-           }
-           spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
-       }
-       /*
-        *      There are no more AIF's
-        */
-       spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+                               *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
+                               fib_adapter_complete(fib, sizeof(u32));
+                               spin_unlock_irqrestore(&dev->fib_lock, flagv);
+                       }
+                       spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
+                       kfree(fib);
+               }
+               /*
+                *      There are no more AIF's
+                */
+               spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
 #if 0
-       schedule();
+               schedule();
 
-       if(signal_pending(current))
-           break;
-       set_current_state(TASK_INTERRUPTIBLE);
+               if(signal_pending(current))
+                       break;
+               set_current_state(TASK_INTERRUPTIBLE);
 #endif
-
-    }
-    
+       }
 #if 0
-    remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
-    dev->aif_thread = 0;
-    complete_and_exit(&dev->aif_completion, 0);
+       remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+       dev->aif_thread = 0;
+       complete_and_exit(&dev->aif_completion, 0);
 #else
-    mdelay(50); 
-    dev->aif_thread = 0;
-
+       mdelay(50); 
+       dev->aif_thread = 0;
 #endif
-    return(0);
 }
index c9b4dfe123f40b2f33a26812afa51ad67c69b341..fab58f32dedee09bf99e68b2887d08e272c8c47d 100644 (file)
  *
  */
 
-#include <xeno/config.h>
-/* #include <xeno/kernel.h> */
-#include <xeno/init.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-#include <xeno/pci.h>
-/*  #include <xeno/spinlock.h> */
-/*  #include <xeno/slab.h> */
-/*  #include <xeno/completion.h> */
-#include <xeno/blk.h>
-/*  #include <asm/semaphore.h> */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+/*#include <linux/completion.h>*/
+#include <linux/blk.h>
+/*#include <asm/semaphore.h>*/
 #include "scsi.h"
 #include "hosts.h"
 
@@ -74,12 +74,14 @@ unsigned int aac_response_normal(struct aac_queue * q)
         */
        while(aac_consumer_get(dev, q, &entry))
        {
-               int fast;
+               u32 fast ;
+               fast = (entry->addr & cpu_to_le32(0x01));
+//             fib = &dev->fibs[(entry->addr >> 1)];
+//             hwfib = fib->hw_fib;
+               hwfib = bus_to_virt(le32_to_cpu(entry->addr & cpu_to_le32(~0x01)));
+               fib = &dev->fibs[hwfib->header.SenderData];
 
-               fast = (int) (entry->addr & 0x01);
-               hwfib = addr2fib(entry->addr & ~0x01);
                aac_consumer_free(dev, q, HostNormRespQueue);
-               fib = &dev->fibs[hwfib->header.SenderData];
                /*
                 *      Remove this fib from the Outstanding I/O queue.
                 *      But only if it has not already been timed out.
@@ -173,32 +175,53 @@ unsigned int aac_command_normal(struct aac_queue *q)
         *      up the waiters until there are no more QEs. We then return
         *      back to the system.
         */
+       dprintk((KERN_INFO
+         "dev=%p, dev->comm_phys=%x, dev->comm_addr=%p, dev->comm_size=%u\n",
+         dev, (u32)dev->comm_phys, dev->comm_addr, (unsigned)dev->comm_size));
+
        while(aac_consumer_get(dev, q, &entry))
        {
-               struct hw_fib * fib;
-               fib = addr2fib(entry->addr);
+               struct fib fibctx;
+               struct fib *fib = &fibctx;
+               u32 hw_fib_pa = le32_to_cpu(entry->addr & cpu_to_le32(~0x01));
+               struct hw_fib * hw_fib_va = ((dev->comm_phys <= hw_fib_pa)
+                && (hw_fib_pa < (dev->comm_phys + dev->comm_size)))
+                 ? dev->comm_addr + (hw_fib_pa - dev->comm_phys)
+                 : /* inconceivable */ bus_to_virt(hw_fib_pa);
+               dprintk((KERN_INFO "hw_fib_pa=%x hw_fib_va=%p\n", hw_fib_pa, hw_fib_va));
 
-               if (dev->aif_thread) {
-                       list_add_tail(&fib->header.FibLinks, &q->cmdq);
+               /*
+                *      Allocate a FIB at all costs. For non queued stuff
+                *      we can just use the stack so we are happy. We need
+                *      a fib object in order to manage the linked lists
+                */
+               if (dev->aif_thread)
+                       if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC))==NULL)
+                               fib = &fibctx;
+                       
+               memset(fib, 0, sizeof(struct fib));
+               INIT_LIST_HEAD(&fib->fiblink);
+               fib->type = FSAFS_NTC_FIB_CONTEXT;
+               fib->size = sizeof(struct fib);
+               fib->hw_fib = hw_fib_va;
+               fib->data = hw_fib_va->data;
+               fib->dev = dev;
+               
+               if (dev->aif_thread && fib != &fibctx)
+               {               
+                       list_add_tail(&fib->fiblink, &q->cmdq);
                        aac_consumer_free(dev, q, HostNormCmdQueue);
 #if 0
                        wake_up_interruptible(&q->cmdready);
 #endif
                } else {
-                       struct fib fibctx;
                        aac_consumer_free(dev, q, HostNormCmdQueue);
                        spin_unlock_irqrestore(q->lock, flags);
-                       memset(&fibctx, 0, sizeof(struct fib));
-                       fibctx.type = FSAFS_NTC_FIB_CONTEXT;
-                       fibctx.size = sizeof(struct fib);
-                       fibctx.fib = fib;
-                       fibctx.data = fib->data;
-                       fibctx.dev = dev;
                        /*
                         *      Set the status of this FIB
                         */
-                       *(u32 *)fib->data = cpu_to_le32(ST_OK);
-                       fib_adapter_complete(&fibctx, sizeof(u32));
+                       *(u32 *)hw_fib_va->data = cpu_to_le32(ST_OK);
+                       fib_adapter_complete(fib, sizeof(u32));
                        spin_lock_irqsave(q->lock, flags);
                }               
        }
index 3e3d60f8acae89f2046d335cb22ade4199e3cc93..f68872d7f299375b4ea606420ea99d2b4ccac23b 100644 (file)
  *     
  */
 
-#define AAC_DRIVER_VERSION             "0.9.9ac6-TEST"
-#define AAC_DRIVER_BUILD_DATE          __DATE__
-
-#include <xeno/module.h>
-#include <xeno/config.h>
-#include <xeno/kernel.h>
-#include <xeno/init.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-#include <xeno/pci.h>
-#include <xeno/spinlock.h>
-/*  #include <xeno/slab.h> */
-/*  #include <xeno/completion.h> */
-/*  #include <asm/semaphore.h> */
-#include <xeno/blk.h>
+#define AAC_DRIVER_VERSION             "1.1.2"
+#define AAC_DRIVER_BUILD_DATE          __DATE__ " " __TIME__
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+/*#include <linux/completion.h>*/
+/*#include <asm/semaphore.h>*/
+#include <linux/blk.h>
 #include "scsi.h"
 #include "hosts.h"
 
 #define AAC_DRIVERNAME "aacraid"
 
 MODULE_AUTHOR("Red Hat Inc and Adaptec");
-MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, PERC 320/DC, Adaptec 2120S, 2200S, 5400S, and HP NetRAID-4M devices. http://domsch.com/xeno/ or http://linux.adaptec.com");
+MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, Adaptec Advanced Raid Products, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com");
 MODULE_LICENSE("GPL");
-MODULE_PARM(nondasd, "i");
-MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
+MODULE_PARM(paemode, "i");
+MODULE_PARM_DESC(paemode, "Control whether dma addressing is using PAE. 0=off, 1=on");
 
-static int nondasd=-1;
+#if 0
+static int paemode = -1;
+#endif
 
 struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS];
 
@@ -81,25 +83,41 @@ static int aac_cfg_major = -1;
  */
  
 static struct aac_driver_ident aac_drivers[] = {
-       { 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 2/Si */
-       { 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
-       { 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Si */
-       { 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Si */
-       { 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
-       { 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
-       { 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
-       { 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
-       { 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
-       { 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2 }, /* catapult*/
-       { 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2 }, /* tomcat*/
-       { 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1 }, /* Adaptec 2120S (Crusader)*/
-       { 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2 }, /* Adaptec 2200S (Vulcan)*/
-       { 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2 }, /* Adaptec 2200S (Vulcan-2m)*/
-       { 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* Dell PERC 320/DC */
-       { 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4 }, /* Adaptec 5400S (Mustang)*/
-       { 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4 }, /* Adaptec 5400S (Mustang)*/
-       { 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4 }, /* Dell PERC2 "Quad Channel" */
-       { 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid",  "HP      ", "NetRAID-4M      ", 4 }  /* HP NetRAID-4M */
+       { 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 2/Si (Iguana/PERC2Si) */
+       { 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Di (Opal/PERC3Di) */
+       { 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Si (SlimFast/PERC3Si */
+       { 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+       { 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Di (Viper/PERC3DiV) */
+       { 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Di (Lexus/PERC3DiL) */
+       { 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Di (Jaguar/PERC3DiJ) */
+       { 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Di (Dagger/PERC3DiD) */
+       { 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 },                 /* PERC 3/Di (Boxster/PERC3DiB) */
+       { 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2 },                 /* catapult */
+       { 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2 },                 /* tomcat */
+       { 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT },/* Adaptec 2120S (Crusader) */
+       { 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT },/* Adaptec 2200S (Vulcan) */
+       { 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT },/* Adaptec 2200S (Vulcan-2m) */
+       { 0x9005, 0x0285, 0x17aa, 0x0286, aac_rx_init, "aacraid",  "Legend  ", "Legend S220     ", 1 },                 /* Legend S220 (Legend Crusader) */
+       { 0x9005, 0x0285, 0x17aa, 0x0287, aac_rx_init, "aacraid",  "Legend  ", "Legend S230     ", 2 },                 /* Legend S230 (Legend Vulcan) */
+
+       { 0x9005, 0x0285, 0x9005, 0x0288, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3230S   ", 2 },                 /* Adaptec 3230S (Harrier) */
+       { 0x9005, 0x0285, 0x9005, 0x0289, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3240S   ", 2 },                 /* Adaptec 3240S (Tornado) */
+       { 0x9005, 0x0285, 0x9005, 0x028a, aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020S PCI-X ", 2 },                 /* ASR-2020S PCI-X ZCR (Skyhawk) */
+       { 0x9005, 0x0285, 0x9005, 0x028b, aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020S PCI-X ", 2 },                 /* ASR-2020S SO-DIMM PCI-X ZCR (Terminator) */
+       { 0x9005, 0x0285, 0x9005, 0x0290, aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2410SA SATA ", 2 },                 /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+       { 0x9005, 0x0285, 0x1028, 0x0291, aac_rx_init, "aacraid",  "DELL    ", "CERC SATA RAID 2 ", 2 },                /* CERC SATA RAID 2 PCI SATA 8ch (DellCorsair) */
+       { 0x9005, 0x0285, 0x9005, 0x0292, aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2810SA SATA ", 2 },                 /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+       { 0x9005, 0x0285, 0x9005, 0x0293, aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-21610SA SATA ", 2 },                /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+       { 0x9005, 0x0285, 0x9005, 0x0294, aac_rx_init, "aacraid",  "ADAPTEC ", "SO-DIMM SATA ZCR ", 2 },                /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+       /* ServeRAID */
+/*     { 0x9005, 0x0250, 0x1014, 0x0279, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec         ", 2 }, */ /*  (Marco) */
+/*     { 0x9005, 0x0250, 0x1014, 0x028c, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec         ", 2 }, */ /* (Sebring)*/
+
+       { 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL    ", "PERC 320/DC     ", 2 },                 /* Perc 320/DC*/
+       { 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4 },                 /* Adaptec 5400S (Mustang)*/
+       { 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4 },                 /* Adaptec 5400S (Mustang)*/
+       { 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4 },                 /* Dell PERC2 "Quad Channel" */
+       { 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid",  "HP      ", "NetRAID         ", 4 }                  /* HP NetRAID-4M */
 };
 
 #define NUM_AACTYPES   (sizeof(aac_drivers) / sizeof(struct aac_driver_ident))
@@ -111,7 +129,7 @@ static int aac_cfg_open(struct inode * inode, struct file * file);
 static int aac_cfg_release(struct inode * inode,struct file * file);
 
 static struct file_operations aac_cfg_fops = {
-/*     owner: THIS_MODULE, */
+       owner: THIS_MODULE,
        ioctl: aac_cfg_ioctl,
        open: aac_cfg_open,
        release: aac_cfg_release
@@ -151,194 +169,156 @@ static void aac_queuedepth(struct Scsi_Host *, Scsi_Device *);
  *     scsi_malloc/scsi_free must not be called.
  *
  */
 static int aac_detect(Scsi_Host_Template *template)
 {
-    int index;
-    int container;
-    u16 vendor_id, device_id;
-    struct Scsi_Host *host_ptr;
-    struct pci_dev *dev = NULL;
-    struct aac_dev *aac;
-    struct fsa_scsi_hba *fsa_dev_ptr;
-    char *name = NULL;
+       int index;
+       int container;
+       u16 vendor_id, device_id;
+       struct Scsi_Host *host_ptr;
+       struct pci_dev *dev = NULL;
+       struct aac_dev *aac;
+       struct fsa_scsi_hba *fsa_dev_ptr;
+       char *name = NULL;
        
-    printk(KERN_INFO "Red Hat/Adaptec aacraid driver, %s\n", 
-          AAC_DRIVER_BUILD_DATE);
-
-
-    /* 
-    ** XXX SMH: we need to take interrupts during detect, but the SCSI 
-    ** layer is holding this lock with interrupts disabled. I don't 
-    ** know how this works on vanilla linux (we 'down' on a semaphone 
-    ** at one point during the process -- how do we wake?) 
-    */
-    spin_unlock_irq(&io_request_lock);
-
-
-    /* setting up the proc directory structure */
-    template->proc_name = "aacraid";
-
-    for( index = 0; index != num_aacdrivers; index++ )
-    {
-       device_id = aac_drivers[index].device;
-       vendor_id = aac_drivers[index].vendor;
-       name = aac_drivers[index].name;
-       dprintk((KERN_DEBUG "Checking %s %x/%x/%x/%x.\n", 
-                name, vendor_id, device_id,
-                aac_drivers[index].subsystem_vendor,
-                aac_drivers[index].subsystem_device));
-
-       dev = NULL;
-       while((dev = pci_find_device(vendor_id, device_id, dev))) {
-           if (pci_enable_device(dev))
-               continue;
-           pci_set_master(dev);
-           pci_set_dma_mask(dev, 0xFFFFFFFFULL);
-
-           if((dev->subsystem_vendor != aac_drivers[index].subsystem_vendor) || 
-              (dev->subsystem_device != aac_drivers[index].subsystem_device))
-               continue;
-
-           dprintk((KERN_DEBUG "%s device detected.\n", name));
-           dprintk((KERN_DEBUG "%x/%x/%x/%x.\n", vendor_id, device_id, 
-                    aac_drivers[index].subsystem_vendor, 
-                    aac_drivers[index].subsystem_device));
-           /* Increment the host adapter count */
-           aac_count++;
-           /*
-            * scsi_register() allocates memory for a Scsi_Hosts
-            * structure and links it into the linked list of host
-            * adapters. This linked list contains the data for all
-            * possible <supported> scsi hosts.  This is similar to
-            * the Scsi_Host_Template, except that we have one entry
-            * for each actual physical host adapter on the system,
-            * stored as a linked list. If there are two AAC boards,
-            * then we will need to make two Scsi_Host entries, but
-            * there will be only one Scsi_Host_Template entry. The
-            * second argument to scsi_register() specifies the size
-            * of the extra memory we want to hold any device specific
-            * information.  */
-           host_ptr = scsi_register( template, sizeof(struct aac_dev) );
-           /* 
-            * These three parameters can be used to allow for wide SCSI 
-            * and for host adapters that support multiple buses.
-            */
-           host_ptr->max_id = 17;
-           host_ptr->max_lun = 8;
-           host_ptr->max_channel = 1;
-           host_ptr->irq = dev->irq;           /* Adapter IRQ number */
-           /* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
-           host_ptr->base = dev->resource[0].start;
-           scsi_set_pci_device(host_ptr, dev);
-           dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", 
-                    host_ptr->base, dev->resource[0].start));
-           dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
-           /*
-            * The unique_id field is a unique identifier that must
-            * be assigned so that we have some way of identifying
-            * each host adapter properly and uniquely. For hosts 
-            * that do not support more than one card in the
-            * system, this does not need to be set. It is
-            * initialized to zero in scsi_register(). This is the 
-            * value returned as aac->id.
-            */
-           host_ptr->unique_id = aac_count - 1;
-           /*
-            *  This function is called after the device list has
-            *  been built to find the tagged queueing depth 
-            *  supported for each device.
-            */
-           host_ptr->select_queue_depths = aac_queuedepth;
-           aac = (struct aac_dev *)host_ptr->hostdata;
-           /* attach a pointer back to Scsi_Host */
-           aac->scsi_host_ptr = host_ptr;      
-           aac->pdev = dev;
-           aac->cardtype =  index;
-           aac->name = aac->scsi_host_ptr->hostt->name;
-           aac->id = aac->scsi_host_ptr->unique_id;
-           /* Initialize the ordinal number of the device to -1 */
-           fsa_dev_ptr = &(aac->fsa_dev);
-           for( container=0; container < MAXIMUM_NUM_CONTAINERS; container++)
-               fsa_dev_ptr->devno[container] = -1;
-
-           dprintk((KERN_DEBUG "Initializing Hardware...\n"));
-
-           if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
-           {
-               /* device initialization failed */
-               printk(KERN_WARNING 
-                      "aacraid: device initialization failed.\n");
-               scsi_unregister(host_ptr);
-               aac_count--;
-               continue;
-           } 
-           dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", 
-                    name, host_ptr->unique_id));
-           aac_get_adapter_info(aac);
-
-           dprintk((KERN_DEBUG "%s got adapter info.\n", name));
-
-           if(nondasd != -1) 
-           {
-               /* someone told us how to set this on the cmdline */
-               aac->nondasd_support = (nondasd!=0);
-           }
-           if(aac->nondasd_support != 0){
-               printk(KERN_INFO "%s%d: Non-DASD support enabled\n", 
-                      aac->name, aac->id);
-           }
-           dprintk((KERN_DEBUG "%s:%d options flag %04x.\n", name, 
-                    host_ptr->unique_id, aac->adapter_info.options));
-           if(aac->nondasd_support == 1)
-           {
-               /*
-                * max channel will be the physical
-                * channels plus 1 virtual channel all
-                * containers are on the virtual
-                * channel 0 physical channels are
-                * address by their actual physical
-                * number+1 */
-               host_ptr->max_channel = aac_drivers[index].channels+1;
-           } else {
-               host_ptr->max_channel = 1;
-           }
-           dprintk((KERN_DEBUG "Device has %d logical channels\n", 
-                    host_ptr->max_channel));
-           aac_get_containers(aac);
-           aac_devices[aac_count-1] = aac;
-
-           /*
-            * dmb - we may need to move these 3 parms somewhere else once
-            * we get a fib that can report the actual numbers
-            */
-           host_ptr->max_id = AAC_MAX_TARGET;
-           host_ptr->max_lun = AAC_MAX_LUN;
-                       
-           /*
-            *  If we are PAE capable then our future DMA mappings
-            *  (for read/write commands) are 64bit clean and don't 
-            *  need bouncing. This assumes we do no other 32bit only
-            *  allocations (eg fib table expands) after this point.
-            */
-                        
-           if(aac->pae_support)
-               pci_set_dma_mask(dev, 0xFFFFFFFFFFFFFFFFUL);
-       }
-    }
+       printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE);
 
-    /* XXX SMH: restore lock and IPL for SCSI layer */
-    spin_lock_irq(&io_request_lock);
+       /* setting up the proc directory structure */
+       template->proc_name = "aacraid";
+       spin_unlock_irq(&io_request_lock);
 
+       for( index = 0; index != num_aacdrivers; index++ )
+       {
+               device_id = aac_drivers[index].device;
+               vendor_id = aac_drivers[index].vendor;
+               name = aac_drivers[index].name;
+               dprintk((KERN_DEBUG "Checking %s %x/%x/%x/%x.\n", 
+                       name, vendor_id, device_id,
+                       aac_drivers[index].subsystem_vendor,
+                       aac_drivers[index].subsystem_device));
+
+               dev = NULL;
+               while((dev = pci_find_device(vendor_id, device_id, dev))) {
+                       if (pci_enable_device(dev))
+                               continue;
+                       pci_set_master(dev);
+                       
+                       if(aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+                               pci_set_dma_mask(dev, 0x7FFFFFFFULL);
+                       else
+                               pci_set_dma_mask(dev, 0xFFFFFFFFULL);
+
+                       if((dev->subsystem_vendor != aac_drivers[index].subsystem_vendor) || 
+                          (dev->subsystem_device != aac_drivers[index].subsystem_device))
+                                       continue;
+
+                       dprintk((KERN_DEBUG "%s device detected.\n", name));
+                       dprintk((KERN_DEBUG "%x/%x/%x/%x.\n", vendor_id, device_id, 
+                               aac_drivers[index].subsystem_vendor, aac_drivers[index].subsystem_device));
+                       /*
+                        * scsi_register() allocates memory for a Scsi_Hosts structure and
+                        * links it into the linked list of host adapters. This linked list
+                        * contains the data for all possible <supported> scsi hosts.
+                        * This is similar to the Scsi_Host_Template, except that we have
+                        * one entry for each actual physical host adapter on the system,
+                        * stored as a linked list. If there are two AAC boards, then we
+                        * will need to make two Scsi_Host entries, but there will be only
+                        * one Scsi_Host_Template entry. The second argument to scsi_register()
+                        * specifies the size of the extra memory we want to hold any device 
+                        * specific information.
+                        */
+                       host_ptr = scsi_register( template, sizeof(struct aac_dev) );
+                       if(host_ptr == NULL)
+                               continue;
+                       /* Increment the host adapter count */
+                       aac_count++;
+                       /* 
+                        * These three parameters can be used to allow for wide SCSI 
+                        * and for host adapters that support multiple buses.
+                        */
+                       host_ptr->irq = dev->irq;               /* Adapter IRQ number */
+                       /* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
+                       host_ptr->base = dev->resource[0].start;
+                       scsi_set_pci_device(host_ptr, dev);
+                       dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", host_ptr->base, dev->resource[0].start));
+                       dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
+                       /*
+                        * The unique_id field is a unique identifier that must
+                        * be assigned so that we have some way of identifying
+                        * each host adapter properly and uniquely. For hosts 
+                        * that do not support more than one card in the
+                        * system, this does not need to be set. It is
+                        * initialized to zero in scsi_register(). This is the 
+                        * value returned as aac->id.
+                        */
+                       host_ptr->unique_id = aac_count - 1;
+                       /*
+                        *      This function is called after the device list has
+                        *      been built to find the tagged queueing depth 
+                        *      supported for each device.
+                        */
+                       host_ptr->select_queue_depths = aac_queuedepth;
+                       aac = (struct aac_dev *)host_ptr->hostdata;
+                       /* attach a pointer back to Scsi_Host */
+                       aac->scsi_host_ptr = host_ptr;  
+                       aac->pdev = dev;
+                       aac->name = aac->scsi_host_ptr->hostt->name;
+                       aac->id = aac->scsi_host_ptr->unique_id;
+                       aac->cardtype =  index;
+
+                       aac->fibs = (struct fib*) kmalloc(sizeof(struct fib)*AAC_NUM_FIB, GFP_KERNEL);
+                       spin_lock_init(&aac->fib_lock);
+
+                       /* Initialize the ordinal number of the device to -1 */
+                       fsa_dev_ptr = &(aac->fsa_dev);
+                       for( container = 0; container < MAXIMUM_NUM_CONTAINERS; container++ )
+                               fsa_dev_ptr->devno[container] = -1;
+
+                       dprintk((KERN_DEBUG "Initializing Hardware...\n"));
+                       if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
+                       {
+                               /* device initialization failed */
+                               printk(KERN_WARNING "aacraid: device initialization failed.\n");
+                               scsi_unregister(host_ptr);
+                               aac_count--;
+                               continue;
+                       } 
+                       dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", name, host_ptr->unique_id));
+                       aac_get_adapter_info(aac);
+                       if(aac->nondasd_support == 1)
+                       {
+                               /*
+                                * max channel will be the physical channels plus 1 virtual channel 
+                                * all containers are on the virtual channel 0
+                                * physical channels are address by their actual physical number+1
+                                */
+                               host_ptr->max_channel = aac_drivers[index].channels+1;
+                       } else {
+                               host_ptr->max_channel = 1;
+                       }
+                       dprintk((KERN_DEBUG "Device has %d logical channels\n", host_ptr->max_channel));
+                       aac_get_containers(aac);
+                       aac_devices[aac_count-1] = aac;
+
+                       /*
+                        * dmb - we may need to move the setting of these parms somewhere else once
+                        * we get a fib that can report the actual numbers
+                        */
+                       host_ptr->max_id = AAC_MAX_TARGET;
+                       host_ptr->max_lun = AAC_MAX_LUN;
+               }
+       }
 
 #if 0
-    if( aac_count ){
-       if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
-           printk(KERN_WARNING "aacraid: unable to register 'aac' device.\n");
-    }
+       if( aac_count ){
+               if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
+                       printk(KERN_WARNING "aacraid: unable to register \"aac\" device.\n");
+       }
 #endif
+       spin_lock_irq(&io_request_lock);
 
-    template->present = aac_count; /* # of cards of this type found */
-    return aac_count;
+       template->present = aac_count; /* # of cards of this type found */
+       return aac_count;
 }
 
 /**
@@ -353,38 +333,37 @@ static int aac_detect(Scsi_Host_Template *template)
 
 static int aac_release(struct Scsi_Host *host_ptr)
 {
-    struct aac_dev *dev;
-    dprintk((KERN_DEBUG "aac_release.\n"));
-    dev = (struct aac_dev *)host_ptr->hostdata;
-    
+       struct aac_dev *dev;
+       dprintk((KERN_DEBUG "aac_release.\n"));
+       dev = (struct aac_dev *)host_ptr->hostdata;
 #if 0
-    /*
-     * kill any threads we started
-     */
-    kill_proc(dev->thread_pid, SIGKILL, 0);
-    wait_for_completion(&dev->aif_completion);
+       /*
+        *      kill any threads we started
+        */
+       kill_proc(dev->thread_pid, SIGKILL, 0);
+       wait_for_completion(&dev->aif_completion);
 #endif
-    /*
-     * Call the comm layer to detach from this adapter
-     */
-    aac_detach(dev);
-    /* Check free orderings... */
-    /* remove interrupt binding */
-    free_irq(host_ptr->irq, dev);
-    iounmap((void * )dev->regs.sa);
-    /* unregister adapter */
-    scsi_unregister(host_ptr);
-    /*
-     * FIXME: This assumes no hot plugging is going on...
-     */
-    if( aac_cfg_major >= 0 )
-    {
+       /*
+        *      Call the comm layer to detach from this adapter
+        */
+       aac_detach(dev);
+       /* Check free orderings... */
+       /* remove interrupt binding */
+       free_irq(host_ptr->irq, dev);
+       iounmap((void * )dev->regs.sa);
+       /* unregister adapter */
+       scsi_unregister(host_ptr);
+       /*
+        *      FIXME: This assumes no hot plugging is going on...
+        */
+       if( aac_cfg_major >= 0 )
+       {
 #if 0
-       unregister_chrdev(aac_cfg_major, "aac");
+               unregister_chrdev(aac_cfg_major, "aac");
 #endif
-       aac_cfg_major = -1;
-    }
-    return 0;
+               aac_cfg_major = -1;
+       }
+       return 0;
 }
 
 /**
@@ -397,16 +376,16 @@ static int aac_release(struct Scsi_Host *host_ptr)
 
 static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*complete)(Scsi_Cmnd *))
 {
-    int ret;
-    
-    scsi_cmnd_ptr->scsi_done = complete;
-    /*
-     * aac_scsi_cmd() handles command processing, setting the 
-     * result code and calling completion routine. 
-     */
-    if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
-       dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
-    return ret;
+       int ret;
+
+       scsi_cmnd_ptr->scsi_done = complete;
+       /*
+        *      aac_scsi_cmd() handles command processing, setting the 
+        *      result code and calling completion routine. 
+        */
+       if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
+               dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
+       return ret;
 } 
 
 /**
@@ -418,8 +397,8 @@ static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*complete)(Scsi_Cmnd
 
 const char *aac_driverinfo(struct Scsi_Host *host_ptr)
 {
-    struct aac_dev *dev = (struct aac_dev *)host_ptr->hostdata;
-    return aac_drivers[dev->cardtype].name;
+       struct aac_dev *dev = (struct aac_dev *)host_ptr->hostdata;
+       return aac_drivers[dev->cardtype].name;
 }
 
 /**
@@ -456,111 +435,110 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
  
 static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
 {
-    struct diskparm *param = (struct diskparm *)geom;
-    /*struct buffer_head * buf;*/
-    
-    dprintk((KERN_DEBUG "aac_biosparm.\n"));
-
-    /*
-     * Assuming extended translation is enabled - #REVISIT#
-     */
-    if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
-    {
-       if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
+       struct diskparm *param = (struct diskparm *)geom;
+#if 0
+       struct buffer_head * buf;
+#endif
+
+       dprintk((KERN_DEBUG "aac_biosparm.\n"));
+
+       /*
+        *      Assuming extended translation is enabled - #REVISIT#
+        */
+       if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
        {
-           param->heads = 255;
-           param->sectors = 63;
+               if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
+               {
+                       param->heads = 255;
+                       param->sectors = 63;
+               }
+               else
+               {
+                       param->heads = 128;
+                       param->sectors = 32;
+               }
        }
        else
        {
-           param->heads = 128;
-           param->sectors = 32;
-       }
-    }
-    else
-    {
-       param->heads = 64;
-       param->sectors = 32;
-    }
-    
-    param->cylinders = disk->capacity/(param->heads * param->sectors);
-       
-#if 0
-    /*
-     * Read the first 1024 bytes from the disk device
-     */
-    
-    buf = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, block_size(dev));
-    if(buf == NULL)
-       return 0;
-    /* 
-     * If the boot sector partition table is valid, search for a partition 
-     * table entry whose end_head matches one of the standard geometry 
-     * translations ( 64/32, 128/32, 255/63 ).
-     */
-
-        
-    if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
-    {
-       struct partition *first = (struct partition * )(buf->b_data + 0x1be);
-       struct partition *entry = first;
-       int saved_cylinders = param->cylinders;
-       int num;
-       unsigned char end_head, end_sec;
-       
-       for(num = 0; num < 4; num++)
-       {
-           end_head = entry->end_head;
-           end_sec = entry->end_sector & 0x3f;
-           
-           if(end_head == 63)
-           {
                param->heads = 64;
                param->sectors = 32;
-               break;
-           }
-           else if(end_head == 127)
-           {
-               param->heads = 128;
-               param->sectors = 32;
-               break;
-           }
-           else if(end_head == 254) 
-           {
-               param->heads = 255;
-               param->sectors = 63;
-               break;
-           }
-           entry++;
-       }
-       
-       if(num == 4)
-       {
-           end_head = first->end_head;
-           end_sec = first->end_sector & 0x3f;
        }
-       
-       param->cylinders = disk->capacity / (param->heads * param->sectors);
-       
-       if(num < 4 && end_sec == param->sectors)
-       {
-           if(param->cylinders != saved_cylinders)
-               dprintk((KERN_DEBUG "Adopting geometry: heads=%d, "
-                        "sectors=%d from partition table %d.\n",
-                        param->heads, param->sectors, num));
-       }
-       else if(end_head > 0 || end_sec > 0)
+
+       param->cylinders = disk->capacity/(param->heads * param->sectors);
+
+#if 0
+       /*
+        *      Read the first 1024 bytes from the disk device
+        */
+
+       buf = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, block_size(dev));
+       if(buf == NULL)
+               return 0;
+       /* 
+        *      If the boot sector partition table is valid, search for a partition 
+        *      table entry whose end_head matches one of the standard geometry 
+        *      translations ( 64/32, 128/32, 255/63 ).
+        */
+        
+       if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
        {
-           dprintk((KERN_DEBUG "Strange geometry: heads=%d, "
-                    "sectors=%d in partition table %d.\n",
-                    end_head + 1, end_sec, num));
-           dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
-                    param->heads, param->sectors));
+               struct partition *first = (struct partition * )(buf->b_data + 0x1be);
+               struct partition *entry = first;
+               int saved_cylinders = param->cylinders;
+               int num;
+               unsigned char end_head, end_sec;
+
+               for(num = 0; num < 4; num++)
+               {
+                       end_head = entry->end_head;
+                       end_sec = entry->end_sector & 0x3f;
+
+                       if(end_head == 63)
+                       {
+                               param->heads = 64;
+                               param->sectors = 32;
+                               break;
+                       }
+                       else if(end_head == 127)
+                       {
+                               param->heads = 128;
+                               param->sectors = 32;
+                               break;
+                       }
+                       else if(end_head == 254) 
+                       {
+                               param->heads = 255;
+                               param->sectors = 63;
+                               break;
+                       }
+                       entry++;
+               }
+
+               if(num == 4)
+               {
+                       end_head = first->end_head;
+                       end_sec = first->end_sector & 0x3f;
+               }
+
+               param->cylinders = disk->capacity / (param->heads * param->sectors);
+
+               if(num < 4 && end_sec == param->sectors)
+               {
+                       if(param->cylinders != saved_cylinders)
+                               dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
+                                       param->heads, param->sectors, num));
+               }
+               else if(end_head > 0 || end_sec > 0)
+               {
+                       dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
+                               end_head + 1, end_sec, num));
+                       dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
+                                       param->heads, param->sectors));
+               }
        }
-    }
-    brelse(buf);
+       brelse(buf);
 #endif
-    return 0;
+       return 0;
 }
 
 /**
@@ -575,20 +553,20 @@ static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
 
 static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
 {
-    Scsi_Device * dptr;
-    
-    dprintk((KERN_DEBUG "aac_queuedepth.\n"));
-    dprintk((KERN_DEBUG "Device #   Q Depth   Online\n"));
-    dprintk((KERN_DEBUG "---------------------------\n"));
-    for(dptr = dev; dptr != NULL; dptr = dptr->next)
-    {
-       if(dptr->host == host)
+       Scsi_Device * dptr;
+
+       dprintk((KERN_DEBUG "aac_queuedepth.\n"));
+       dprintk((KERN_DEBUG "Device #   Q Depth   Online\n"));
+       dprintk((KERN_DEBUG "---------------------------\n"));
+       for(dptr = dev; dptr != NULL; dptr = dptr->next)
        {
-           dptr->queue_depth = 10;             
-           dprintk((KERN_DEBUG "  %2d         %d        %d\n", 
-                    dptr->id, dptr->queue_depth, dptr->online));
+               if(dptr->host == host)
+               {
+                       dptr->queue_depth = 10;         
+                       dprintk((KERN_DEBUG "  %2d         %d        %d\n", 
+                               dptr->id, dptr->queue_depth, dptr->online));
+               }
        }
-    }
 }
 
 
@@ -603,7 +581,7 @@ static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
  
 static int aac_eh_abort(Scsi_Cmnd *cmd)
 {
-    return FAILED;
+       return FAILED;
 }
 
 /**
@@ -617,7 +595,7 @@ static int aac_eh_abort(Scsi_Cmnd *cmd)
 
 static int aac_eh_device_reset(Scsi_Cmnd *cmd)
 {
-    return FAILED;
+       return FAILED;
 }
 
 /**
@@ -631,7 +609,7 @@ static int aac_eh_device_reset(Scsi_Cmnd *cmd)
 
 static int aac_eh_bus_reset(Scsi_Cmnd* cmd)
 {
-    return FAILED;
+       return FAILED;
 }
 
 /**
@@ -645,8 +623,8 @@ static int aac_eh_bus_reset(Scsi_Cmnd* cmd)
 
 static int aac_eh_reset(Scsi_Cmnd* cmd)
 {
-    printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n");
-    return FAILED;
+       printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n");
+       return FAILED;
 }
 
 /**
@@ -661,10 +639,10 @@ static int aac_eh_reset(Scsi_Cmnd* cmd)
  
 static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
 {
-    struct aac_dev *dev;
-    dprintk((KERN_DEBUG "aac_ioctl.\n"));
-    dev = (struct aac_dev *)scsi_dev_ptr->host->hostdata;
-    return aac_do_ioctl(dev, cmd, arg);
+       struct aac_dev *dev;
+       dprintk((KERN_DEBUG "aac_ioctl.\n"));
+       dev = (struct aac_dev *)scsi_dev_ptr->host->hostdata;
+       return aac_do_ioctl(dev, cmd, arg);
 }
 
 /**
@@ -681,10 +659,10 @@ static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
 
 static int aac_cfg_open(struct inode * inode, struct file * file )
 {
-    unsigned minor_number = MINOR(inode->i_rdev);
-    if(minor_number >= aac_count)
-       return -ENODEV;
-    return 0;
+       unsigned minor_number = MINOR(inode->i_rdev);
+       if(minor_number >= aac_count)
+               return -ENODEV;
+       return 0;
 }
 
 /**
@@ -698,7 +676,7 @@ static int aac_cfg_open(struct inode * inode, struct file * file )
  
 static int aac_cfg_release(struct inode * inode, struct file * file )
 {
-    return 0;
+       return 0;
 }
 
 /**
@@ -717,8 +695,8 @@ static int aac_cfg_release(struct inode * inode, struct file * file )
  
 static int aac_cfg_ioctl(struct inode * inode,  struct file * file, unsigned int cmd, unsigned long arg )
 {
-    struct aac_dev *dev = aac_devices[MINOR(inode->i_rdev)];
-    return aac_do_ioctl(dev, cmd, (void *)arg);
+       struct aac_dev *dev = aac_devices[MINOR(inode->i_rdev)];
+       return aac_do_ioctl(dev, cmd, (void *)arg);
 }
 
 /*
@@ -728,9 +706,13 @@ static int aac_cfg_ioctl(struct inode * inode,  struct file * file, unsigned int
  */
  
 static Scsi_Host_Template driver_template = {
-/*     module:                 THIS_MODULE, */
+#if 0
+       module:                 THIS_MODULE,
+#endif
        name:                   "AAC",
-/*     proc_info:              aac_procinfo, */
+#if 0
+       proc_info:              aac_procinfo,
+#endif
        detect:                 aac_detect,
        release:                aac_release,
        info:                   aac_driverinfo,
@@ -780,11 +762,13 @@ static Scsi_Host_Template driver_template = {
 static int aac_procinfo(char *proc_buffer, char **start_ptr,off_t offset,
                        int bytes_available, int host_no, int write)
 {
-    if(write || offset > 0)
-       return 0;
-    *start_ptr = proc_buffer;
-    return sprintf(proc_buffer, "%s  %d\n", 
-                  "Raid Controller, scsi hba number", host_no);
+       if(write || offset > 0)
+               return 0;
+       *start_ptr = proc_buffer;
+       return sprintf(proc_buffer,
+         "Adaptec Raid Controller %s %s, scsi hba number %d\n",
+         AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE,
+         host_no);
 }
 #endif
 
index 8d4685ba0f0fc6141ff64d4d7108e63a3853a10f..ebb797041ee7c6189f632d9129821918cdc410e5 100644 (file)
  *
  */
 
-#include <xeno/config.h>
-#include <xeno/kernel.h>
-#include <xeno/init.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-#include <xeno/pci.h>
-/*  #include <xeno/spinlock.h> */
-/*  #include <xeno/slab.h> */
-#include <xeno/blk.h>
-#include <xeno/delay.h>
-/*  #include <xeno/completion.h> */
-/*  #include <asm/semaphore.h> */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blk.h>
+#include <linux/delay.h>
+/*#include <linux/completion.h>*/
+/*#include <asm/semaphore.h>*/
 #include "scsi.h"
 #include "hosts.h"
 
 
 static void aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
 {
-    struct aac_dev *dev = dev_id;
-    unsigned long bellbits;
-    u8 intstat, mask;
-    intstat = rx_readb(dev, MUnit.OISR);
-    /*
-     * Read mask and invert because drawbridge is reversed.
-     * This allows us to only service interrupts that have 
-     * been enabled.
-     */
-    mask = ~(rx_readb(dev, MUnit.OIMR));
-    /* Check to see if this is our interrupt.  If it isn't just return */
-    
-    if (intstat & mask) 
-    {
-       bellbits = rx_readl(dev, OutboundDoorbellReg);
-       if (bellbits & DoorBellPrintfReady) {
-           aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
-           rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
-           rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
-       }
-       else if (bellbits & DoorBellAdapterNormCmdReady) {
-           aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
-           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
-       }
-       else if (bellbits & DoorBellAdapterNormRespReady) {
-           aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
-           rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
-       }
-       else if (bellbits & DoorBellAdapterNormCmdNotFull) {
-           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
-       }
-       else if (bellbits & DoorBellAdapterNormRespNotFull) {
-           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
-           rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+       struct aac_dev *dev = dev_id;
+       unsigned long bellbits;
+       u8 intstat, mask;
+       intstat = rx_readb(dev, MUnit.OISR);
+       /*
+        *      Read mask and invert because drawbridge is reversed.
+        *      This allows us to only service interrupts that have 
+        *      been enabled.
+        */
+       mask = ~(rx_readb(dev, MUnit.OIMR));
+       /* Check to see if this is our interrupt.  If it isn't just return */
+       if (intstat & mask) 
+       {
+               bellbits = rx_readl(dev, OutboundDoorbellReg);
+               if (bellbits & DoorBellPrintfReady) {
+                       aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
+                       rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
+                       rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
+               }
+               else if (bellbits & DoorBellAdapterNormCmdReady) {
+                       aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+                       rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
+               }
+               else if (bellbits & DoorBellAdapterNormRespReady) {
+                       aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+                       rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+               }
+               else if (bellbits & DoorBellAdapterNormCmdNotFull) {
+                       rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+               }
+               else if (bellbits & DoorBellAdapterNormRespNotFull) {
+                       rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+                       rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+               }
        }
-    }
 }
 
 /**
@@ -95,24 +94,24 @@ static void aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
  
 static void aac_rx_enable_interrupt(struct aac_dev * dev, u32 event)
 {
-    switch (event) {
-       
-    case HostNormCmdQue:
-       dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
-       break;
-       
-    case HostNormRespQue:
-       dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
-       break;
-       
-    case AdapNormCmdNotFull:
-       dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
-       break;
-       
-    case AdapNormRespNotFull:
-       dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
-       break;
-    }
+       switch (event) {
+
+       case HostNormCmdQue:
+               dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
+               break;
+
+       case HostNormRespQue:
+               dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
+               break;
+
+       case AdapNormCmdNotFull:
+               dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
+               break;
+
+       case AdapNormRespNotFull:
+               dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
+               break;
+       }
 }
 
 /**
@@ -125,24 +124,24 @@ static void aac_rx_enable_interrupt(struct aac_dev * dev, u32 event)
 
 static void aac_rx_disable_interrupt(struct aac_dev *dev, u32 event)
 {
-    switch (event) {
-       
-    case HostNormCmdQue:
-       dev->irq_mask |= (OUTBOUNDDOORBELL_1);
-       break;
-       
-    case HostNormRespQue:
-       dev->irq_mask |= (OUTBOUNDDOORBELL_2);
-       break;
-       
-    case AdapNormCmdNotFull:
-       dev->irq_mask |= (OUTBOUNDDOORBELL_3);
-       break;
-       
-    case AdapNormRespNotFull:
-       dev->irq_mask |= (OUTBOUNDDOORBELL_4);
-       break;
-    }
+       switch (event) {
+
+       case HostNormCmdQue:
+               dev->irq_mask |= (OUTBOUNDDOORBELL_1);
+               break;
+
+       case HostNormRespQue:
+               dev->irq_mask |= (OUTBOUNDDOORBELL_2);
+               break;
+
+       case AdapNormCmdNotFull:
+               dev->irq_mask |= (OUTBOUNDDOORBELL_3);
+               break;
+
+       case AdapNormRespNotFull:
+               dev->irq_mask |= (OUTBOUNDDOORBELL_4);
+               break;
+       }
 }
 
 /**
@@ -158,91 +157,89 @@ static void aac_rx_disable_interrupt(struct aac_dev *dev, u32 event)
 
 static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
 {
-    unsigned long start;
-    int ok;
-    /*
-     * Write the command into Mailbox 0
-     */
-    rx_writel(dev, InboundMailbox0, cpu_to_le32(command));
-    /*
-     * Write the parameters into Mailboxes 1 - 4
-     */
-    rx_writel(dev, InboundMailbox1, cpu_to_le32(p1));
-    rx_writel(dev, InboundMailbox2, 0);
-    rx_writel(dev, InboundMailbox3, 0);
-    rx_writel(dev, InboundMailbox4, 0);
-    /*
-     * Clear the synch command doorbell to start on a clean slate.
-     */
-    rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
-    /*
-     * Disable doorbell interrupts
-     */
-    rx_writeb(dev, MUnit.OIMR, rx_readb(dev, MUnit.OIMR) | 0x04);
-    /*
-     * Force the completion of the mask register write before issuing
-     * the interrupt.
-     */
-    rx_readb (dev, MUnit.OIMR);
-    /*
-     * Signal that there is a new synch command
-     */
-    rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
-    
-    ok = 0;
-    start = jiffies;
-    
-    /*
-     * Wait up to 30 seconds
-     */
-    while (time_before(jiffies, start+30*HZ)) 
-    {
-       /* Delay 5 microseconds to let Mon960 get info. */
-       udelay(5);      
+       unsigned long start;
+       int ok;
        /*
-        *      Mon960 will set doorbell0 bit when its completed the command.
+        *      Write the command into Mailbox 0
         */
-       if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
-           /*
-            *  Clear the doorbell.
-            */
-           rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
-           ok = 1;
-           break;
-       }
-#if 0
+       rx_writel(dev, InboundMailbox0, cpu_to_le32(command));
        /*
-        *      Yield the processor in case we are slow 
+        *      Write the parameters into Mailboxes 1 - 4
         */
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(1);
-#else 
-       /* XXX SMH: not in xen we don't */
-       mdelay(50); 
+       rx_writel(dev, InboundMailbox1, cpu_to_le32(p1));
+       rx_writel(dev, InboundMailbox2, 0);
+       rx_writel(dev, InboundMailbox3, 0);
+       rx_writel(dev, InboundMailbox4, 0);
+       /*
+        *      Clear the synch command doorbell to start on a clean slate.
+        */
+       rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+       /*
+        *      Disable doorbell interrupts
+        */
+       rx_writeb(dev, MUnit.OIMR, rx_readb(dev, MUnit.OIMR) | 0x04);
+       /*
+        *      Force the completion of the mask register write before issuing
+        *      the interrupt.
+        */
+       rx_readb (dev, MUnit.OIMR);
+       /*
+        *      Signal that there is a new synch command
+        */
+       rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
+
+       ok = 0;
+       start = jiffies;
+
+       /*
+        *      Wait up to 30 seconds
+        */
+       while (time_before(jiffies, start+30*HZ)) 
+       {
+               udelay(5);      /* Delay 5 microseconds to let Mon960 get info. */
+               /*
+                *      Mon960 will set doorbell0 bit when it has completed the command.
+                */
+               if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
+                       /*
+                        *      Clear the doorbell.
+                        */
+                       rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+                       ok = 1;
+                       break;
+               }
+#if 0
+               /*
+                *      Yield the processor in case we are slow 
+                */
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(1);
+#else
+               /* XXX SMH: not in xen we don't */
+               mdelay(50); 
 #endif
-       
-    }
-    if (ok != 1) {
+       }
+       if (ok != 1) {
+               /*
+                *      Restore interrupt mask even though we timed out
+                */
+               rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
+               return -ETIMEDOUT;
+       }
+       /*
+        *      Pull the synch status from Mailbox 0.
+        */
+       *status = le32_to_cpu(rx_readl(dev, IndexRegs.Mailbox[0]));
        /*
-        *      Restore interrupt mask even though we timed out
+        *      Clear the synch command doorbell.
+        */
+       rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+       /*
+        *      Restore interrupt mask
         */
        rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
-       return -ETIMEDOUT;
-    }
-    /*
-     * Pull the synch status from Mailbox 0.
-     */
-    *status = le32_to_cpu(rx_readl(dev, IndexRegs.Mailbox[0]));
-    /*
-     * Clear the synch command doorbell.
-     */
-    rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
-    /*
-     * Restore interrupt mask
-     */
-    rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
-    return 0;
-    
+       return 0;
+
 }
 
 /**
@@ -254,8 +251,8 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
 
 static void aac_rx_interrupt_adapter(struct aac_dev *dev)
 {
-    u32 ret;
-    rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
+       u32 ret;
+       rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
 }
 
 /**
@@ -269,33 +266,33 @@ static void aac_rx_interrupt_adapter(struct aac_dev *dev)
 
 static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
 {
-    switch (event) {
-       
-    case AdapNormCmdQue:
-       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
-       break;
-    case HostNormRespNotFull:
-       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
-       break;
-    case AdapNormRespQue:
-       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
-       break;
-    case HostNormCmdNotFull:
-       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
-       break;
-    case HostShutdown:
+       switch (event) {
+
+       case AdapNormCmdQue:
+               rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
+               break;
+       case HostNormRespNotFull:
+               rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
+               break;
+       case AdapNormRespQue:
+               rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
+               break;
+       case HostNormCmdNotFull:
+               rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
+               break;
+       case HostShutdown:
 //             rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
-       break;
-    case FastIo:
-       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
-       break;
-    case AdapPrintfDone:
-       rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
-       break;
-    default:
-       BUG();
-       break;
-    }
+               break;
+       case FastIo:
+               rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
+               break;
+       case AdapPrintfDone:
+               rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
+               break;
+       default:
+               BUG();
+               break;
+       }
 }
 
 /**
@@ -307,31 +304,27 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
 
 static void aac_rx_start_adapter(struct aac_dev *dev)
 {
-    u32 status;
-    struct aac_init *init;
-    
-    init = dev->init;
-
-    init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
-    /*
-     * Tell the adapter we are back and up and running so it will scan
-     * its command queues and enable our interrupts
-     */
-    dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | 
-                    OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | 
-                    OUTBOUNDDOORBELL_4);
-    /*
-     * First clear out all interrupts.  Then enable the one's that we
-     * can handle.
-     */
-    rx_writeb(dev, MUnit.OIMR, 0xff);
-    rx_writel(dev, MUnit.ODR, 0xffffffff);
+       u32 status;
+       struct aac_init *init;
+
+       init = dev->init;
+       init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
+       /*
+        *      Tell the adapter we are back and up and running so it will scan
+        *      its command queues and enable our interrupts
+        */
+       dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
+       /*
+        *      First clear out all interrupts.  Then enable the one's that we
+        *      can handle.
+        */
+       rx_writeb(dev, MUnit.OIMR, 0xff);
+       rx_writel(dev, MUnit.ODR, 0xffffffff);
 //     rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
-    rx_writeb(dev, MUnit.OIMR, 0xfb);
-    
-    // We can only use a 32 bit address here
-    rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 
-               (u32)(ulong)dev->init_pa, &status);
+       rx_writeb(dev, MUnit.OIMR, 0xfb);
+
+       // We can only use a 32 bit address here
+       rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
 }
 
 /**
@@ -346,112 +339,91 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
 
 int aac_rx_init(struct aac_dev *dev, unsigned long num)
 {
-    unsigned long start;
-    unsigned long status;
-    int instance;
-    const char * name;
-    
-    dev->devnum = num;
-    instance = dev->id;
-    name     = dev->name;
-    
-    dprintk((KERN_ERR "aac_rx_init called, num %ld, scsi host ptr = %p\n", 
-            num, (void *)(dev->scsi_host_ptr))); 
-    
-    dprintk((KERN_ERR "scsi_host_ptr->base is %p\n", 
-            (void *)dev->scsi_host_ptr->base)); 
-    /*
-     * Map in the registers from the adapter.
-     */
-    if((dev->regs.rx = (struct rx_registers *)
-       ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
-    {  
-       printk(KERN_WARNING "aacraid: unable to map i960.\n" );
-       return -1;
-    }
-    
-//     dprintk((KERN_ERR "aac_rx_init: AAA\n")); 
-    /*
-     * Check to see if the board failed any self tests.
-     */
-    if (rx_readl(dev, IndexRegs.Mailbox[7]) & SELF_TEST_FAILED) {
-       printk(KERN_ERR "%s%d: adapter self-test failed.\n", 
-              dev->name, instance);
-       return -1;
-    }
-    
-    
-//     dprintk((KERN_ERR "aac_rx_init: BBB\n")); 
-    /*
-     * Check to see if the board panic'd while booting.
-     */
-    if (rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_PANIC) {
-       printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", 
-              dev->name, instance);
-       return -1;
-    }
-    start = jiffies;
-    
-//     dprintk((KERN_ERR "aac_rx_init: DDD\n")); 
-    /*
-     * Wait for the adapter to be up and running. Wait up to 3 minutes
-     */
-    while (!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 
-    {
-       if(time_after(jiffies, start+180*HZ))
-       {
-           status = rx_readl(dev, IndexRegs.Mailbox[7]) >> 16;
-           printk(KERN_ERR "%s%d: adapter kernel failed to start,"
-                  "init status = %ld.\n", dev->name, 
-                  instance, status);
-           return -1;
+       unsigned long start;
+       unsigned long status;
+       int instance;
+       const char * name;
+
+       dev->devnum = num;
+       instance = dev->id;
+       name     = dev->name;
+
+       /*
+        *      Map in the registers from the adapter.
+        */
+       if((dev->regs.rx = (struct rx_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+       {       
+               printk(KERN_WARNING "aacraid: unable to map i960.\n" );
+               return -1;
+       }
+       /*
+        *      Check to see if the board failed any self tests.
+        */
+       if (rx_readl(dev, IndexRegs.Mailbox[7]) & SELF_TEST_FAILED) {
+               printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+               return -1;
        }
-// dprintk((KERN_ERR "aac_rx_init: XXX\n")); 
-       
-#if 0 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(1);
+       /*
+        *      Check to see if the board panic'd while booting.
+        */
+       if (rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_PANIC) {
+               printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
+               return -1;
+       }
+       start = jiffies;
+       /*
+        *      Wait for the adapter to be up and running. Wait up to 3 minutes
+        */
+       while (!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 
+       {
+               if(time_after(jiffies, start+180*HZ))
+               {
+                       status = rx_readl(dev, IndexRegs.Mailbox[7]) >> 16;
+                       printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %ld.\n", dev->name, instance, status);
+                       return -1;
+               }
+#if 0
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(1);
 #else
-       /* XXX SMH: no sleeping for us (we're the xen idle task) */
-       mdelay(50); 
+               /* XXX SMH: no sleeping for us (we're the xen idle task) */
+               mdelay(50); 
 #endif
-       
-    }
-    
-//     dprintk((KERN_ERR "aac_rx_init: ZZZ!\n")); 
-    if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, 
-                   SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev) < 0) 
-    {
-       printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 
-              name, instance);
-       return -1;
-    }
-    /*
-     * Fill in the function dispatch table.
-     */
-    dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
-    dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt;
-    dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
-    dev->a_ops.adapter_notify = aac_rx_notify_adapter;
-    dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
-    
-    if (aac_init_adapter(dev) == NULL)
-       return -1;
+       }
+       if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 
+       {
+               printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
+               return -1;
+       }
+       /*
+        *      Fill in the function dispatch table.
+        */
+       dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
+       dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt;
+       dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
+       dev->a_ops.adapter_notify = aac_rx_notify_adapter;
+       dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+
+       if (aac_init_adapter(dev) == NULL)
+               return -1;
 #ifdef TRY_TASKLET
-    aac_command_tasklet.data = (unsigned long)dev;
-    tasklet_enable(&aac_command_tasklet);
+       aac_command_tasklet.data = (unsigned long)dev;
+       tasklet_enable(&aac_command_tasklet);
 #else
-    /*
-     * Start any kernel threads needed
-     */
-    dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, 
-                                   dev, 0);
-#endif 
-
-    /*
-     * Tell the adapter that all is configured, and it can start
-     * accepting requests
-     */
-    aac_rx_start_adapter(dev);
-    return 0;
+       /*
+        *      Start any kernel threads needed
+        */
+       dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+       if(dev->thread_pid < 0)
+       {
+               printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
+               return -1;
+       }       
+#endif
+       /*
+        *      Tell the adapter that all is configured, and it can start
+        *      accepting requests
+        */
+       aac_rx_start_adapter(dev);
+       return 0;
 }
index edb5679d417989fffaf270d5f9caa79ba2b6717b..9a453176dfabb7c4be6222782a475411b226917b 100644 (file)
  *
  */
 
-#include <xeno/config.h>
-#include <xeno/kernel.h>
-#include <xeno/init.h>
-#include <xeno/types.h>
-#include <xeno/sched.h>
-/*  #include <xeno/pci.h> */
-/*  #include <xeno/spinlock.h> */
-/*  #include <xeno/slab.h> */
-#include <xeno/blk.h>
-#include <xeno/delay.h>
-/*  #include <xeno/completion.h> */
-/*  #include <asm/semaphore.h> */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blk.h>
+#include <linux/delay.h>
+/*#include <linux/completion.h>*/
+/*#include <asm/semaphore.h>*/
 #include "scsi.h"
 #include "hosts.h"
 
@@ -235,9 +235,9 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *ret)
 #if 0
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(1);
+#else
+               mdelay(100);
 #endif
-               mdelay(100); 
-
        }
 
        if (ok != 1)
@@ -353,7 +353,7 @@ int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
         *      Wait for the adapter to be up and running. Wait up to 3 minutes.
         */
        while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
-               if (time_after(start+180*HZ, jiffies)) {
+               if (time_after(jiffies, start+180*HZ)) {
                        status = sa_readl(dev, Mailbox7) >> 16;
                        printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %d.\n", name, instance, le32_to_cpu(status));
                        return -1;
@@ -361,8 +361,9 @@ int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
 #if 0
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(1);
+#else
+               mdelay(100);
 #endif
-               mdelay(100); 
        }
 
        dprintk(("ATIRQ\n"));
@@ -392,8 +393,11 @@ int aac_sa_init(struct aac_dev *dev, unsigned long devnum)
         *      Start any kernel threads needed
         */
        dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
+       if (dev->thread_pid < 0) {
+            printk(KERN_ERR "aacraid: Unable to create command thread.\n");
+            return -1;
+       }
 #endif
-
        /*
         *      Tell the adapter that all is configure, and it can start 
         *      accepting requests